summaryrefslogtreecommitdiffstats
path: root/src/collectors
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-11-25 14:45:37 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-11-25 14:48:03 +0000
commite55403ed71282d7bfd8b56df219de3c28a8af064 (patch)
tree524889e5becb81643bf8741e3082955dca076f09 /src/collectors
parentReleasing debian version 1.47.5-1. (diff)
downloadnetdata-e55403ed71282d7bfd8b56df219de3c28a8af064.tar.xz
netdata-e55403ed71282d7bfd8b56df219de3c28a8af064.zip
Merging upstream version 2.0.3+dfsg:
- does not include dygraphs anymore (Closes: #923993) - does not include pako anymore (Closes: #1042533) - does not include dashboard binaries anymore (Closes: #1045145) Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'src/collectors')
-rw-r--r--src/collectors/COLLECTORS.md42
-rw-r--r--src/collectors/README.md82
-rw-r--r--src/collectors/REFERENCE.md99
-rw-r--r--src/collectors/all.h55
-rw-r--r--src/collectors/apps.plugin/README.md450
-rw-r--r--src/collectors/apps.plugin/apps_aggregations.c250
-rw-r--r--src/collectors/apps.plugin/apps_functions.c395
-rw-r--r--src/collectors/apps.plugin/apps_groups.conf521
-rw-r--r--src/collectors/apps.plugin/apps_incremental_collection.c187
-rw-r--r--src/collectors/apps.plugin/apps_os_freebsd.c368
-rw-r--r--src/collectors/apps.plugin/apps_os_linux.c770
-rw-r--r--src/collectors/apps.plugin/apps_os_macos.c334
-rw-r--r--src/collectors/apps.plugin/apps_os_windows.c1011
-rw-r--r--src/collectors/apps.plugin/apps_os_windows_nt.c41
-rw-r--r--src/collectors/apps.plugin/apps_output.c346
-rw-r--r--src/collectors/apps.plugin/apps_pid.c927
-rw-r--r--src/collectors/apps.plugin/apps_pid_files.c450
-rw-r--r--src/collectors/apps.plugin/apps_pid_match.c90
-rw-r--r--src/collectors/apps.plugin/apps_plugin.c722
-rw-r--r--src/collectors/apps.plugin/apps_plugin.h771
-rw-r--r--src/collectors/apps.plugin/apps_proc_meminfo.c68
-rw-r--r--src/collectors/apps.plugin/apps_proc_pid_cmdline.c130
-rw-r--r--src/collectors/apps.plugin/apps_proc_pid_fd.c753
-rw-r--r--src/collectors/apps.plugin/apps_proc_pid_io.c95
-rw-r--r--src/collectors/apps.plugin/apps_proc_pid_limits.c151
-rw-r--r--src/collectors/apps.plugin/apps_proc_pid_stat.c293
-rw-r--r--src/collectors/apps.plugin/apps_proc_pid_status.c192
-rw-r--r--src/collectors/apps.plugin/apps_proc_pids.c720
-rw-r--r--src/collectors/apps.plugin/apps_proc_stat.c154
-rw-r--r--src/collectors/apps.plugin/apps_targets.c464
-rw-r--r--src/collectors/apps.plugin/apps_users_and_groups.c206
-rw-r--r--src/collectors/apps.plugin/busy_threads.c76
-rw-r--r--src/collectors/cgroups.plugin/README.md9
-rw-r--r--src/collectors/cgroups.plugin/cgroup-discovery.c6
-rw-r--r--src/collectors/cgroups.plugin/cgroup-internals.h8
-rwxr-xr-xsrc/collectors/cgroups.plugin/cgroup-name.sh.in4
-rw-r--r--src/collectors/cgroups.plugin/cgroup-network.c257
-rw-r--r--src/collectors/cgroups.plugin/cgroup-top.c4
-rw-r--r--src/collectors/cgroups.plugin/sys_fs_cgroup.c37
-rw-r--r--src/collectors/charts.d.plugin/README.md9
l---------src/collectors/charts.d.plugin/apcupsd/README.md1
-rw-r--r--src/collectors/charts.d.plugin/apcupsd/apcupsd.chart.sh306
-rw-r--r--src/collectors/charts.d.plugin/apcupsd/apcupsd.conf25
-rw-r--r--src/collectors/charts.d.plugin/apcupsd/integrations/apc_ups.md237
-rw-r--r--src/collectors/charts.d.plugin/apcupsd/metadata.yaml256
-rw-r--r--src/collectors/charts.d.plugin/example/README.md9
-rw-r--r--src/collectors/charts.d.plugin/libreswan/integrations/libreswan.md6
-rw-r--r--src/collectors/charts.d.plugin/opensips/integrations/opensips.md6
l---------src/collectors/charts.d.plugin/sensors/README.md1
-rw-r--r--src/collectors/charts.d.plugin/sensors/integrations/linux_sensors_sysfs.md235
-rw-r--r--src/collectors/charts.d.plugin/sensors/metadata.yaml182
-rw-r--r--src/collectors/charts.d.plugin/sensors/sensors.chart.sh250
-rw-r--r--src/collectors/charts.d.plugin/sensors/sensors.conf32
-rw-r--r--src/collectors/checks.plugin/README.md12
-rw-r--r--src/collectors/common-contexts/common-contexts.h26
-rw-r--r--src/collectors/common-contexts/disk-avgsz.h44
-rw-r--r--src/collectors/common-contexts/disk-await.h44
-rw-r--r--src/collectors/common-contexts/disk-busy.h41
-rw-r--r--src/collectors/common-contexts/disk-io.h (renamed from src/collectors/common-contexts/disk.io.h)0
-rw-r--r--src/collectors/common-contexts/disk-iotime.h44
-rw-r--r--src/collectors/common-contexts/disk-ops.h44
-rw-r--r--src/collectors/common-contexts/disk-qops.h41
-rw-r--r--src/collectors/common-contexts/disk-svctm.h41
-rw-r--r--src/collectors/common-contexts/disk-util.h41
-rw-r--r--src/collectors/common-contexts/mem-available.h (renamed from src/collectors/common-contexts/mem.available.h)0
-rw-r--r--src/collectors/common-contexts/mem-pgfaults.h (renamed from src/collectors/common-contexts/mem.pgfaults.h)2
-rw-r--r--src/collectors/common-contexts/mem-swap.h (renamed from src/collectors/common-contexts/mem.swap.h)2
-rw-r--r--src/collectors/common-contexts/system-interrupts.h (renamed from src/collectors/common-contexts/system.interrupts.h)2
-rw-r--r--src/collectors/common-contexts/system-io.h (renamed from src/collectors/common-contexts/system.io.h)0
-rw-r--r--src/collectors/common-contexts/system-ipc.h (renamed from src/collectors/common-contexts/system.ipc.h)0
-rw-r--r--src/collectors/common-contexts/system-processes.h (renamed from src/collectors/common-contexts/system.processes.h)0
-rw-r--r--src/collectors/common-contexts/system-ram.h (renamed from src/collectors/common-contexts/system.ram.h)0
-rw-r--r--src/collectors/cups.plugin/cups_plugin.c6
-rw-r--r--src/collectors/cups.plugin/integrations/cups.md4
-rw-r--r--src/collectors/debugfs.plugin/debugfs_extfrag.c31
-rw-r--r--src/collectors/debugfs.plugin/debugfs_plugin.c6
-rw-r--r--src/collectors/debugfs.plugin/integrations/linux_zswap.md4
-rw-r--r--src/collectors/debugfs.plugin/integrations/power_capping.md4
-rw-r--r--src/collectors/debugfs.plugin/integrations/system_memory_fragmentation.md4
-rw-r--r--src/collectors/diskspace.plugin/integrations/disk_space.md6
-rw-r--r--src/collectors/diskspace.plugin/metadata.yaml2
-rw-r--r--src/collectors/diskspace.plugin/plugin_diskspace.c22
-rw-r--r--src/collectors/ebpf.plugin/README.md468
-rw-r--r--src/collectors/ebpf.plugin/ebpf.c59
-rw-r--r--src/collectors/ebpf.plugin/ebpf_apps.c11
-rw-r--r--src/collectors/ebpf.plugin/ebpf_apps.h2
-rw-r--r--src/collectors/ebpf.plugin/ebpf_cachestat.c24
-rw-r--r--src/collectors/ebpf.plugin/ebpf_cgroup.c5
-rw-r--r--src/collectors/ebpf.plugin/ebpf_dcstat.c24
-rw-r--r--src/collectors/ebpf.plugin/ebpf_disk.c10
-rw-r--r--src/collectors/ebpf.plugin/ebpf_fd.c25
-rw-r--r--src/collectors/ebpf.plugin/ebpf_filesystem.c12
-rw-r--r--src/collectors/ebpf.plugin/ebpf_functions.c6
-rw-r--r--src/collectors/ebpf.plugin/ebpf_hardirq.c12
-rw-r--r--src/collectors/ebpf.plugin/ebpf_mdflush.c12
-rw-r--r--src/collectors/ebpf.plugin/ebpf_mount.c10
-rw-r--r--src/collectors/ebpf.plugin/ebpf_oomkill.c12
-rw-r--r--src/collectors/ebpf.plugin/ebpf_process.c14
-rw-r--r--src/collectors/ebpf.plugin/ebpf_shm.c27
-rw-r--r--src/collectors/ebpf.plugin/ebpf_socket.c37
-rw-r--r--src/collectors/ebpf.plugin/ebpf_socket.h4
-rw-r--r--src/collectors/ebpf.plugin/ebpf_softirq.c10
-rw-r--r--src/collectors/ebpf.plugin/ebpf_swap.c27
-rw-r--r--src/collectors/ebpf.plugin/ebpf_sync.c12
-rw-r--r--src/collectors/ebpf.plugin/ebpf_vfs.c20
-rw-r--r--src/collectors/ebpf.plugin/integrations/ebpf_cachestat.md4
-rw-r--r--src/collectors/ebpf.plugin/integrations/ebpf_dcstat.md4
-rw-r--r--src/collectors/ebpf.plugin/integrations/ebpf_disk.md4
-rw-r--r--src/collectors/ebpf.plugin/integrations/ebpf_filedescriptor.md4
-rw-r--r--src/collectors/ebpf.plugin/integrations/ebpf_filesystem.md4
-rw-r--r--src/collectors/ebpf.plugin/integrations/ebpf_hardirq.md4
-rw-r--r--src/collectors/ebpf.plugin/integrations/ebpf_mdflush.md4
-rw-r--r--src/collectors/ebpf.plugin/integrations/ebpf_mount.md4
-rw-r--r--src/collectors/ebpf.plugin/integrations/ebpf_oomkill.md4
-rw-r--r--src/collectors/ebpf.plugin/integrations/ebpf_processes.md4
-rw-r--r--src/collectors/ebpf.plugin/integrations/ebpf_shm.md4
-rw-r--r--src/collectors/ebpf.plugin/integrations/ebpf_socket.md4
-rw-r--r--src/collectors/ebpf.plugin/integrations/ebpf_softirq.md4
-rw-r--r--src/collectors/ebpf.plugin/integrations/ebpf_swap.md4
-rw-r--r--src/collectors/ebpf.plugin/integrations/ebpf_sync.md4
-rw-r--r--src/collectors/ebpf.plugin/integrations/ebpf_vfs.md4
-rw-r--r--src/collectors/freebsd.plugin/README.md11
-rw-r--r--src/collectors/freebsd.plugin/freebsd_devstat.c14
-rw-r--r--src/collectors/freebsd.plugin/freebsd_getifaddrs.c10
-rw-r--r--src/collectors/freebsd.plugin/freebsd_ipfw.c1
-rw-r--r--src/collectors/freebsd.plugin/freebsd_sysctl.c27
-rw-r--r--src/collectors/freebsd.plugin/integrations/dev.cpu.0.freq.md4
-rw-r--r--src/collectors/freebsd.plugin/integrations/dev.cpu.temperature.md4
-rw-r--r--src/collectors/freebsd.plugin/integrations/devstat.md4
-rw-r--r--src/collectors/freebsd.plugin/integrations/getifaddrs.md4
-rw-r--r--src/collectors/freebsd.plugin/integrations/getmntinfo.md4
-rw-r--r--src/collectors/freebsd.plugin/integrations/hw.intrcnt.md4
-rw-r--r--src/collectors/freebsd.plugin/integrations/ipfw.md4
-rw-r--r--src/collectors/freebsd.plugin/integrations/kern.cp_time.md4
-rw-r--r--src/collectors/freebsd.plugin/integrations/kern.ipc.msq.md4
-rw-r--r--src/collectors/freebsd.plugin/integrations/kern.ipc.sem.md4
-rw-r--r--src/collectors/freebsd.plugin/integrations/kern.ipc.shm.md4
-rw-r--r--src/collectors/freebsd.plugin/integrations/net.inet.icmp.stats.md4
-rw-r--r--src/collectors/freebsd.plugin/integrations/net.inet.ip.stats.md4
-rw-r--r--src/collectors/freebsd.plugin/integrations/net.inet.tcp.states.md4
-rw-r--r--src/collectors/freebsd.plugin/integrations/net.inet.tcp.stats.md4
-rw-r--r--src/collectors/freebsd.plugin/integrations/net.inet.udp.stats.md4
-rw-r--r--src/collectors/freebsd.plugin/integrations/net.inet6.icmp6.stats.md4
-rw-r--r--src/collectors/freebsd.plugin/integrations/net.inet6.ip6.stats.md4
-rw-r--r--src/collectors/freebsd.plugin/integrations/net.isr.md4
-rw-r--r--src/collectors/freebsd.plugin/integrations/system.ram.md4
-rw-r--r--src/collectors/freebsd.plugin/integrations/uptime.md4
-rw-r--r--src/collectors/freebsd.plugin/integrations/vm.loadavg.md4
-rw-r--r--src/collectors/freebsd.plugin/integrations/vm.stats.sys.v_intr.md4
-rw-r--r--src/collectors/freebsd.plugin/integrations/vm.stats.sys.v_soft.md4
-rw-r--r--src/collectors/freebsd.plugin/integrations/vm.stats.sys.v_swtch.md4
-rw-r--r--src/collectors/freebsd.plugin/integrations/vm.stats.vm.v_pgfaults.md4
-rw-r--r--src/collectors/freebsd.plugin/integrations/vm.stats.vm.v_swappgs.md4
-rw-r--r--src/collectors/freebsd.plugin/integrations/vm.swap_info.md4
-rw-r--r--src/collectors/freebsd.plugin/integrations/vm.vmtotal.md4
-rw-r--r--src/collectors/freebsd.plugin/integrations/zfs.md4
-rw-r--r--src/collectors/freebsd.plugin/plugin_freebsd.c5
-rw-r--r--src/collectors/freeipmi.plugin/freeipmi_plugin.c23
-rw-r--r--src/collectors/freeipmi.plugin/integrations/intelligent_platform_management_interface_ipmi.md4
-rw-r--r--src/collectors/idlejitter.plugin/integrations/idle_os_jitter.md6
-rw-r--r--src/collectors/idlejitter.plugin/metadata.yaml4
-rw-r--r--src/collectors/idlejitter.plugin/plugin_idlejitter.c4
-rw-r--r--src/collectors/ioping.plugin/integrations/ioping.md4
-rw-r--r--src/collectors/log2journal/README.md37
-rw-r--r--src/collectors/log2journal/log2journal-hashed-key.h80
-rw-r--r--src/collectors/log2journal/log2journal-help.c2
-rw-r--r--src/collectors/log2journal/log2journal-inject.c11
-rw-r--r--src/collectors/log2journal/log2journal-params.c58
-rw-r--r--src/collectors/log2journal/log2journal-pattern.c4
-rw-r--r--src/collectors/log2journal/log2journal-pcre2.c9
-rw-r--r--src/collectors/log2journal/log2journal-rename.c6
-rw-r--r--src/collectors/log2journal/log2journal-replace.c12
-rw-r--r--src/collectors/log2journal/log2journal-rewrite.c7
-rw-r--r--src/collectors/log2journal/log2journal-txt.h90
-rw-r--r--src/collectors/log2journal/log2journal-yaml.c301
-rw-r--r--src/collectors/log2journal/log2journal.c64
-rw-r--r--src/collectors/log2journal/log2journal.h251
-rw-r--r--src/collectors/macos.plugin/integrations/macos.md4
-rw-r--r--src/collectors/macos.plugin/macos_fw.c10
-rw-r--r--src/collectors/macos.plugin/macos_mach_smi.c1
-rw-r--r--src/collectors/macos.plugin/macos_sysctl.c11
-rw-r--r--src/collectors/macos.plugin/plugin_macos.c5
-rw-r--r--src/collectors/network-viewer.plugin/network-viewer.c48
-rw-r--r--src/collectors/nfacct.plugin/integrations/netfilter.md4
-rw-r--r--src/collectors/nfacct.plugin/plugin_nfacct.c6
-rw-r--r--src/collectors/perf.plugin/integrations/cpu_performance.md10
-rw-r--r--src/collectors/perf.plugin/metadata.yaml2
-rw-r--r--src/collectors/perf.plugin/perf_plugin.c49
-rw-r--r--src/collectors/plugins.d/README.md875
-rw-r--r--src/collectors/plugins.d/functions-table.md418
-rw-r--r--src/collectors/plugins.d/gperf-config.txt112
-rw-r--r--src/collectors/plugins.d/gperf-hashtable.h237
-rw-r--r--src/collectors/plugins.d/plugins_d.c350
-rw-r--r--src/collectors/plugins.d/plugins_d.h54
-rw-r--r--src/collectors/plugins.d/pluginsd_dyncfg.c69
-rw-r--r--src/collectors/plugins.d/pluginsd_dyncfg.h11
-rw-r--r--src/collectors/plugins.d/pluginsd_functions.c412
-rw-r--r--src/collectors/plugins.d/pluginsd_functions.h48
-rw-r--r--src/collectors/plugins.d/pluginsd_internals.c120
-rw-r--r--src/collectors/plugins.d/pluginsd_internals.h355
-rw-r--r--src/collectors/plugins.d/pluginsd_parser.c1402
-rw-r--r--src/collectors/plugins.d/pluginsd_parser.h244
-rw-r--r--src/collectors/plugins.d/pluginsd_replication.c371
-rw-r--r--src/collectors/plugins.d/pluginsd_replication.h14
-rw-r--r--src/collectors/proc.plugin/README.md417
-rw-r--r--src/collectors/proc.plugin/integrations/system_statistics.md4
-rw-r--r--src/collectors/proc.plugin/ipc.c8
-rw-r--r--src/collectors/proc.plugin/plugin_proc.c7
-rw-r--r--src/collectors/proc.plugin/proc_diskstats.c422
-rw-r--r--src/collectors/proc.plugin/proc_mdstat.c2
-rw-r--r--src/collectors/proc.plugin/proc_meminfo.c19
-rw-r--r--src/collectors/proc.plugin/proc_net_dev.c34
-rw-r--r--src/collectors/proc.plugin/proc_net_netstat.c25
-rw-r--r--src/collectors/proc.plugin/proc_net_rpc_nfs.c3
-rw-r--r--src/collectors/proc.plugin/proc_net_rpc_nfsd.c3
-rw-r--r--src/collectors/proc.plugin/proc_net_sctp_snmp.c4
-rw-r--r--src/collectors/proc.plugin/proc_net_sockstat.c2
-rw-r--r--src/collectors/proc.plugin/proc_net_stat_conntrack.c6
-rw-r--r--src/collectors/proc.plugin/proc_net_wireless.c11
-rw-r--r--src/collectors/proc.plugin/proc_pressure.c5
-rw-r--r--src/collectors/proc.plugin/proc_spl_kstat_zfs.c2
-rw-r--r--src/collectors/proc.plugin/proc_stat.c7
-rw-r--r--src/collectors/proc.plugin/proc_uptime.c2
-rw-r--r--src/collectors/proc.plugin/proc_vmstat.c4
-rw-r--r--src/collectors/proc.plugin/sys_class_drm.c2
-rw-r--r--src/collectors/proc.plugin/sys_class_infiniband.c17
-rw-r--r--src/collectors/proc.plugin/sys_class_power_supply.c2
-rw-r--r--src/collectors/proc.plugin/sys_devices_pci_aer.c2
-rw-r--r--src/collectors/proc.plugin/sys_devices_system_edac_mc.c2
-rw-r--r--src/collectors/proc.plugin/sys_devices_system_node.c322
-rw-r--r--src/collectors/proc.plugin/sys_fs_btrfs.c16
-rw-r--r--src/collectors/profile.plugin/README.md24
-rw-r--r--src/collectors/profile.plugin/plugin_profile.cc10
-rw-r--r--src/collectors/python.d.plugin/README.md35
-rw-r--r--src/collectors/python.d.plugin/am2320/integrations/am2320.md4
-rw-r--r--src/collectors/python.d.plugin/anomalies/README.md248
-rw-r--r--src/collectors/python.d.plugin/anomalies/anomalies.chart.py425
-rw-r--r--src/collectors/python.d.plugin/anomalies/anomalies.conf184
-rw-r--r--src/collectors/python.d.plugin/anomalies/metadata.yaml87
l---------src/collectors/python.d.plugin/boinc/README.md1
-rw-r--r--src/collectors/python.d.plugin/boinc/boinc.chart.py168
-rw-r--r--src/collectors/python.d.plugin/boinc/boinc.conf66
-rw-r--r--src/collectors/python.d.plugin/boinc/integrations/boinc.md238
-rw-r--r--src/collectors/python.d.plugin/boinc/metadata.yaml198
l---------src/collectors/python.d.plugin/ceph/README.md1
-rw-r--r--src/collectors/python.d.plugin/ceph/ceph.chart.py374
-rw-r--r--src/collectors/python.d.plugin/ceph/ceph.conf75
-rw-r--r--src/collectors/python.d.plugin/ceph/integrations/ceph.md228
-rw-r--r--src/collectors/python.d.plugin/ceph/metadata.yaml223
-rw-r--r--src/collectors/python.d.plugin/go_expvar/integrations/go_applications_expvar.md14
-rw-r--r--src/collectors/python.d.plugin/go_expvar/metadata.yaml6
-rw-r--r--src/collectors/python.d.plugin/haproxy/README.md9
l---------src/collectors/python.d.plugin/openldap/README.md1
-rw-r--r--src/collectors/python.d.plugin/openldap/integrations/openldap.md249
-rw-r--r--src/collectors/python.d.plugin/openldap/metadata.yaml225
-rw-r--r--src/collectors/python.d.plugin/openldap/openldap.chart.py216
-rw-r--r--src/collectors/python.d.plugin/openldap/openldap.conf75
l---------src/collectors/python.d.plugin/oracledb/README.md1
-rw-r--r--src/collectors/python.d.plugin/oracledb/integrations/oracle_db.md260
-rw-r--r--src/collectors/python.d.plugin/oracledb/metadata.yaml309
-rw-r--r--src/collectors/python.d.plugin/oracledb/oracledb.chart.py846
-rw-r--r--src/collectors/python.d.plugin/oracledb/oracledb.conf88
-rw-r--r--src/collectors/python.d.plugin/pandas/integrations/pandas.md8
-rw-r--r--src/collectors/python.d.plugin/python.d.conf17
-rw-r--r--src/collectors/python.d.plugin/python.d.plugin.in3
-rw-r--r--src/collectors/python.d.plugin/python_modules/bases/loaders.py14
-rw-r--r--src/collectors/python.d.plugin/python_modules/pyyaml2/__init__.py316
-rw-r--r--src/collectors/python.d.plugin/python_modules/pyyaml2/composer.py140
-rw-r--r--src/collectors/python.d.plugin/python_modules/pyyaml2/constructor.py676
-rw-r--r--src/collectors/python.d.plugin/python_modules/pyyaml2/cyaml.py86
-rw-r--r--src/collectors/python.d.plugin/python_modules/pyyaml2/dumper.py63
-rw-r--r--src/collectors/python.d.plugin/python_modules/pyyaml2/emitter.py1141
-rw-r--r--src/collectors/python.d.plugin/python_modules/pyyaml2/error.py76
-rw-r--r--src/collectors/python.d.plugin/python_modules/pyyaml2/events.py87
-rw-r--r--src/collectors/python.d.plugin/python_modules/pyyaml2/loader.py41
-rw-r--r--src/collectors/python.d.plugin/python_modules/pyyaml2/nodes.py50
-rw-r--r--src/collectors/python.d.plugin/python_modules/pyyaml2/parser.py590
-rw-r--r--src/collectors/python.d.plugin/python_modules/pyyaml2/reader.py191
-rw-r--r--src/collectors/python.d.plugin/python_modules/pyyaml2/representer.py485
-rw-r--r--src/collectors/python.d.plugin/python_modules/pyyaml2/resolver.py225
-rw-r--r--src/collectors/python.d.plugin/python_modules/pyyaml2/scanner.py1458
-rw-r--r--src/collectors/python.d.plugin/python_modules/pyyaml2/serializer.py112
-rw-r--r--src/collectors/python.d.plugin/python_modules/pyyaml2/tokens.py105
-rw-r--r--src/collectors/python.d.plugin/python_modules/pyyaml3/__init__.py313
-rw-r--r--src/collectors/python.d.plugin/python_modules/pyyaml3/composer.py140
-rw-r--r--src/collectors/python.d.plugin/python_modules/pyyaml3/constructor.py687
-rw-r--r--src/collectors/python.d.plugin/python_modules/pyyaml3/cyaml.py86
-rw-r--r--src/collectors/python.d.plugin/python_modules/pyyaml3/dumper.py63
-rw-r--r--src/collectors/python.d.plugin/python_modules/pyyaml3/emitter.py1138
-rw-r--r--src/collectors/python.d.plugin/python_modules/pyyaml3/error.py76
-rw-r--r--src/collectors/python.d.plugin/python_modules/pyyaml3/events.py87
-rw-r--r--src/collectors/python.d.plugin/python_modules/pyyaml3/loader.py41
-rw-r--r--src/collectors/python.d.plugin/python_modules/pyyaml3/nodes.py50
-rw-r--r--src/collectors/python.d.plugin/python_modules/pyyaml3/parser.py590
-rw-r--r--src/collectors/python.d.plugin/python_modules/pyyaml3/reader.py193
-rw-r--r--src/collectors/python.d.plugin/python_modules/pyyaml3/representer.py375
-rw-r--r--src/collectors/python.d.plugin/python_modules/pyyaml3/resolver.py225
-rw-r--r--src/collectors/python.d.plugin/python_modules/pyyaml3/scanner.py1449
-rw-r--r--src/collectors/python.d.plugin/python_modules/pyyaml3/serializer.py112
-rw-r--r--src/collectors/python.d.plugin/python_modules/pyyaml3/tokens.py105
-rw-r--r--src/collectors/python.d.plugin/python_modules/third_party/boinc_client.py515
-rw-r--r--src/collectors/python.d.plugin/python_modules/third_party/mcrcon.py74
-rw-r--r--src/collectors/python.d.plugin/python_modules/urllib3/__init__.py98
-rw-r--r--src/collectors/python.d.plugin/python_modules/urllib3/_collections.py320
-rw-r--r--src/collectors/python.d.plugin/python_modules/urllib3/connection.py374
-rw-r--r--src/collectors/python.d.plugin/python_modules/urllib3/connectionpool.py900
-rw-r--r--src/collectors/python.d.plugin/python_modules/urllib3/contrib/__init__.py0
-rw-r--r--src/collectors/python.d.plugin/python_modules/urllib3/contrib/_securetransport/__init__.py0
-rw-r--r--src/collectors/python.d.plugin/python_modules/urllib3/contrib/_securetransport/bindings.py591
-rw-r--r--src/collectors/python.d.plugin/python_modules/urllib3/contrib/_securetransport/low_level.py344
-rw-r--r--src/collectors/python.d.plugin/python_modules/urllib3/contrib/appengine.py297
-rw-r--r--src/collectors/python.d.plugin/python_modules/urllib3/contrib/ntlmpool.py113
-rw-r--r--src/collectors/python.d.plugin/python_modules/urllib3/contrib/pyopenssl.py458
-rw-r--r--src/collectors/python.d.plugin/python_modules/urllib3/contrib/securetransport.py808
-rw-r--r--src/collectors/python.d.plugin/python_modules/urllib3/contrib/socks.py189
-rw-r--r--src/collectors/python.d.plugin/python_modules/urllib3/exceptions.py247
-rw-r--r--src/collectors/python.d.plugin/python_modules/urllib3/fields.py179
-rw-r--r--src/collectors/python.d.plugin/python_modules/urllib3/filepost.py95
-rw-r--r--src/collectors/python.d.plugin/python_modules/urllib3/packages/__init__.py5
-rw-r--r--src/collectors/python.d.plugin/python_modules/urllib3/packages/backports/__init__.py0
-rw-r--r--src/collectors/python.d.plugin/python_modules/urllib3/packages/backports/makefile.py54
-rw-r--r--src/collectors/python.d.plugin/python_modules/urllib3/packages/ordered_dict.py260
-rw-r--r--src/collectors/python.d.plugin/python_modules/urllib3/packages/six.py852
-rw-r--r--src/collectors/python.d.plugin/python_modules/urllib3/packages/ssl_match_hostname/__init__.py20
-rw-r--r--src/collectors/python.d.plugin/python_modules/urllib3/packages/ssl_match_hostname/_implementation.py156
-rw-r--r--src/collectors/python.d.plugin/python_modules/urllib3/poolmanager.py441
-rw-r--r--src/collectors/python.d.plugin/python_modules/urllib3/request.py149
-rw-r--r--src/collectors/python.d.plugin/python_modules/urllib3/response.py623
-rw-r--r--src/collectors/python.d.plugin/python_modules/urllib3/util/__init__.py55
-rw-r--r--src/collectors/python.d.plugin/python_modules/urllib3/util/connection.py131
-rw-r--r--src/collectors/python.d.plugin/python_modules/urllib3/util/request.py119
-rw-r--r--src/collectors/python.d.plugin/python_modules/urllib3/util/response.py82
-rw-r--r--src/collectors/python.d.plugin/python_modules/urllib3/util/retry.py402
-rw-r--r--src/collectors/python.d.plugin/python_modules/urllib3/util/selectors.py588
-rw-r--r--src/collectors/python.d.plugin/python_modules/urllib3/util/ssl_.py338
-rw-r--r--src/collectors/python.d.plugin/python_modules/urllib3/util/timeout.py243
-rw-r--r--src/collectors/python.d.plugin/python_modules/urllib3/util/url.py231
-rw-r--r--src/collectors/python.d.plugin/python_modules/urllib3/util/wait.py41
l---------src/collectors/python.d.plugin/samba/README.md1
-rw-r--r--src/collectors/python.d.plugin/samba/integrations/samba.md255
-rw-r--r--src/collectors/python.d.plugin/samba/metadata.yaml205
-rw-r--r--src/collectors/python.d.plugin/samba/samba.chart.py144
-rw-r--r--src/collectors/python.d.plugin/samba/samba.conf60
l---------src/collectors/python.d.plugin/spigotmc/README.md1
-rw-r--r--src/collectors/python.d.plugin/spigotmc/integrations/spigotmc.md250
-rw-r--r--src/collectors/python.d.plugin/spigotmc/metadata.yaml176
-rw-r--r--src/collectors/python.d.plugin/spigotmc/spigotmc.chart.py184
-rw-r--r--src/collectors/python.d.plugin/spigotmc/spigotmc.conf66
-rw-r--r--src/collectors/python.d.plugin/traefik/README.md9
l---------src/collectors/python.d.plugin/varnish/README.md1
-rw-r--r--src/collectors/python.d.plugin/varnish/integrations/varnish.md247
-rw-r--r--src/collectors/python.d.plugin/varnish/metadata.yaml253
-rw-r--r--src/collectors/python.d.plugin/varnish/varnish.chart.py385
-rw-r--r--src/collectors/python.d.plugin/varnish/varnish.conf66
l---------src/collectors/python.d.plugin/w1sensor/README.md1
-rw-r--r--src/collectors/python.d.plugin/w1sensor/integrations/1-wire_sensors.md201
-rw-r--r--src/collectors/python.d.plugin/w1sensor/metadata.yaml119
-rw-r--r--src/collectors/python.d.plugin/w1sensor/w1sensor.chart.py97
-rw-r--r--src/collectors/python.d.plugin/w1sensor/w1sensor.conf72
l---------src/collectors/python.d.plugin/zscores/README.md1
-rw-r--r--src/collectors/python.d.plugin/zscores/integrations/python.d_zscores.md229
-rw-r--r--src/collectors/python.d.plugin/zscores/metadata.yaml187
-rw-r--r--src/collectors/python.d.plugin/zscores/zscores.chart.py146
-rw-r--r--src/collectors/python.d.plugin/zscores/zscores.conf108
-rw-r--r--src/collectors/slabinfo.plugin/integrations/linux_kernel_slab_allocator_statistics.md4
-rw-r--r--src/collectors/slabinfo.plugin/slabinfo.c9
-rw-r--r--src/collectors/statsd.plugin/README.md23
-rw-r--r--src/collectors/statsd.plugin/asterisk.md8
-rw-r--r--src/collectors/statsd.plugin/k6.md8
-rw-r--r--src/collectors/statsd.plugin/statsd.c38
-rw-r--r--src/collectors/systemd-journal.plugin/README.md7
-rw-r--r--src/collectors/systemd-journal.plugin/active_journal_centralization_guide_no_encryption.md2
-rw-r--r--src/collectors/systemd-journal.plugin/forward_secure_sealing.md7
-rw-r--r--src/collectors/systemd-journal.plugin/passive_journal_centralization_guide_no_encryption.md4
-rw-r--r--src/collectors/systemd-journal.plugin/passive_journal_centralization_guide_self_signed_certs.md11
-rw-r--r--src/collectors/systemd-journal.plugin/systemd-internals.h2
-rw-r--r--src/collectors/systemd-journal.plugin/systemd-journal-annotations.c30
-rw-r--r--src/collectors/systemd-journal.plugin/systemd-journal-dyncfg.c4
-rw-r--r--src/collectors/systemd-journal.plugin/systemd-journal-files.c88
-rw-r--r--src/collectors/systemd-journal.plugin/systemd-journal-sampling.h378
-rw-r--r--src/collectors/systemd-journal.plugin/systemd-journal-watcher.c2
-rw-r--r--src/collectors/systemd-journal.plugin/systemd-journal.c1546
-rw-r--r--src/collectors/systemd-journal.plugin/systemd-main.c11
-rw-r--r--src/collectors/systemd-journal.plugin/systemd-units.c17
-rw-r--r--src/collectors/tc.plugin/integrations/tc_qos_classes.md8
-rw-r--r--src/collectors/tc.plugin/metadata.yaml4
-rw-r--r--src/collectors/tc.plugin/plugin_tc.c4
-rw-r--r--src/collectors/timex.plugin/integrations/timex.md4
-rw-r--r--src/collectors/timex.plugin/plugin_timex.c10
-rw-r--r--src/collectors/utils/local_listeners.c (renamed from src/collectors/plugins.d/local_listeners.c)147
-rw-r--r--src/collectors/utils/ndsudo.c (renamed from src/collectors/plugins.d/ndsudo.c)36
-rw-r--r--src/collectors/windows-events.plugin/README.md289
-rw-r--r--src/collectors/windows-events.plugin/windows-events-fields-cache.c158
-rw-r--r--src/collectors/windows-events.plugin/windows-events-fields-cache.h22
-rw-r--r--src/collectors/windows-events.plugin/windows-events-providers.c678
-rw-r--r--src/collectors/windows-events.plugin/windows-events-providers.h41
-rw-r--r--src/collectors/windows-events.plugin/windows-events-query-builder.c107
-rw-r--r--src/collectors/windows-events.plugin/windows-events-query-builder.h10
-rw-r--r--src/collectors/windows-events.plugin/windows-events-query-evt-variant.c354
-rw-r--r--src/collectors/windows-events.plugin/windows-events-query.c717
-rw-r--r--src/collectors/windows-events.plugin/windows-events-query.h296
-rw-r--r--src/collectors/windows-events.plugin/windows-events-sources.c644
-rw-r--r--src/collectors/windows-events.plugin/windows-events-sources.h78
-rw-r--r--src/collectors/windows-events.plugin/windows-events-unicode.c46
-rw-r--r--src/collectors/windows-events.plugin/windows-events-unicode.h42
-rw-r--r--src/collectors/windows-events.plugin/windows-events-xml.c344
-rw-r--r--src/collectors/windows-events.plugin/windows-events-xml.h12
-rw-r--r--src/collectors/windows-events.plugin/windows-events.c1402
-rw-r--r--src/collectors/windows-events.plugin/windows-events.h262
-rw-r--r--src/collectors/windows.plugin/GetSystemUptime.c68
-rw-r--r--src/collectors/windows.plugin/integrations/memory_statistics.md123
-rw-r--r--src/collectors/windows.plugin/integrations/system_statistics.md123
-rw-r--r--src/collectors/windows.plugin/integrations/system_thermal_zone.md121
-rw-r--r--src/collectors/windows.plugin/metadata.yaml276
-rw-r--r--src/collectors/windows.plugin/metdata.yaml92
-rw-r--r--src/collectors/windows.plugin/perflib-dump.c529
-rw-r--r--src/collectors/windows.plugin/perflib-hyperv.c1793
-rw-r--r--src/collectors/windows.plugin/perflib-memory.c284
-rw-r--r--src/collectors/windows.plugin/perflib-mssql.c1413
-rw-r--r--src/collectors/windows.plugin/perflib-names.c242
-rw-r--r--src/collectors/windows.plugin/perflib-netframework.c796
-rw-r--r--src/collectors/windows.plugin/perflib-network.c1500
-rw-r--r--src/collectors/windows.plugin/perflib-objects.c94
-rw-r--r--src/collectors/windows.plugin/perflib-processes.c116
-rw-r--r--src/collectors/windows.plugin/perflib-processor.c410
-rw-r--r--src/collectors/windows.plugin/perflib-rrd.c822
-rw-r--r--src/collectors/windows.plugin/perflib-rrd.h23
-rw-r--r--src/collectors/windows.plugin/perflib-storage.c949
-rw-r--r--src/collectors/windows.plugin/perflib-thermalzone.c103
-rw-r--r--src/collectors/windows.plugin/perflib-web-service.c669
-rw-r--r--src/collectors/windows.plugin/perflib.c671
-rw-r--r--src/collectors/windows.plugin/perflib.h72
-rw-r--r--src/collectors/windows.plugin/windows-internals.h35
-rw-r--r--src/collectors/windows.plugin/windows_plugin.c33
-rw-r--r--src/collectors/windows.plugin/windows_plugin.h77
-rw-r--r--src/collectors/xenstat.plugin/integrations/xen_xcp-ng.md4
-rw-r--r--src/collectors/xenstat.plugin/xenstat_plugin.c7
437 files changed, 23514 insertions, 48433 deletions
diff --git a/src/collectors/COLLECTORS.md b/src/collectors/COLLECTORS.md
index 608649a38..f5aa095e7 100644
--- a/src/collectors/COLLECTORS.md
+++ b/src/collectors/COLLECTORS.md
@@ -23,8 +23,6 @@ If you don't see the app/service you'd like to monitor in this list:
- If you don't see the collector there, you can make a [feature request](https://github.com/netdata/netdata/issues/new/choose) on GitHub.
-- If you have basic software development skills, you can add your own plugin in [Go](/src/go/plugin/go.d/README.md#how-to-develop-a-collector) or [Python](/docs/developer-and-contributor-corner/python-collector.md)
-
## Available Data Collection Integrations
<!-- AUTOGENERATED PART BY integrations/gen_doc_collector_page.py SCRIPT, DO NOT EDIT MANUALLY -->
### APM
@@ -77,7 +75,7 @@ If you don't see the app/service you'd like to monitor in this list:
- [OpenLDAP (community)](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/modules/prometheus/integrations/openldap_community.md)
-- [OpenLDAP](https://github.com/netdata/netdata/blob/master/src/collectors/python.d.plugin/openldap/integrations/openldap.md)
+- [OpenLDAP](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/modules/openldap/integrations/openldap.md)
- [RADIUS](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/modules/prometheus/integrations/radius.md)
@@ -215,6 +213,8 @@ If you don't see the app/service you'd like to monitor in this list:
- [AWS RDS](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/modules/prometheus/integrations/aws_rds.md)
+- [BOINC](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/modules/boinc/integrations/boinc.md)
+
- [Cassandra](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/modules/cassandra/integrations/cassandra.md)
- [ClickHouse](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/modules/clickhouse/integrations/clickhouse.md)
@@ -237,6 +237,8 @@ If you don't see the app/service you'd like to monitor in this list:
- [MariaDB](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/modules/mysql/integrations/mariadb.md)
+- [MaxScale](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/modules/maxscale/integrations/maxscale.md)
+
- [Memcached (community)](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/modules/prometheus/integrations/memcached_community.md)
- [Memcached](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/modules/memcached/integrations/memcached.md)
@@ -249,7 +251,7 @@ If you don't see the app/service you'd like to monitor in this list:
- [Oracle DB (community)](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/modules/prometheus/integrations/oracle_db_community.md)
-- [Oracle DB](https://github.com/netdata/netdata/blob/master/src/collectors/python.d.plugin/oracledb/integrations/oracle_db.md)
+- [Oracle DB](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/modules/oracledb/integrations/oracle_db.md)
- [Patroni](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/modules/prometheus/integrations/patroni.md)
@@ -281,8 +283,6 @@ If you don't see the app/service you'd like to monitor in this list:
### Distributed Computing Systems
-- [BOINC](https://github.com/netdata/netdata/blob/master/src/collectors/python.d.plugin/boinc/integrations/boinc.md)
-
- [Gearman](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/modules/gearman/integrations/gearman.md)
### DNS and DHCP Servers
@@ -429,7 +429,7 @@ If you don't see the app/service you'd like to monitor in this list:
- [OpenRCT2](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/modules/prometheus/integrations/openrct2.md)
-- [SpigotMC](https://github.com/netdata/netdata/blob/master/src/collectors/python.d.plugin/spigotmc/integrations/spigotmc.md)
+- [SpigotMC](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/modules/spigotmc/integrations/spigotmc.md)
- [Steam](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/modules/prometheus/integrations/steam.md)
@@ -459,7 +459,7 @@ If you don't see the app/service you'd like to monitor in this list:
### Hardware Devices and Sensors
-- [1-Wire Sensors](https://github.com/netdata/netdata/blob/master/src/collectors/python.d.plugin/w1sensor/integrations/1-wire_sensors.md)
+- [1-Wire Sensors](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/modules/w1sensor/integrations/1-wire_sensors.md)
- [AM2320](https://github.com/netdata/netdata/blob/master/src/collectors/python.d.plugin/am2320/integrations/am2320.md)
@@ -485,9 +485,7 @@ If you don't see the app/service you'd like to monitor in this list:
- [Intelligent Platform Management Interface (IPMI)](https://github.com/netdata/netdata/blob/master/src/collectors/freeipmi.plugin/integrations/intelligent_platform_management_interface_ipmi.md)
-- [Linux Sensors (lm-sensors)](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/modules/sensors/integrations/linux_sensors_lm-sensors.md)
-
-- [Linux Sensors (sysfs)](https://github.com/netdata/netdata/blob/master/src/collectors/charts.d.plugin/sensors/integrations/linux_sensors_sysfs.md)
+- [Linux Sensors](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/modules/sensors/integrations/linux_sensors.md)
- [NVML](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/modules/prometheus/integrations/nvml.md)
@@ -891,8 +889,6 @@ If you don't see the app/service you'd like to monitor in this list:
- [GitHub repository](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/modules/prometheus/integrations/github_repository.md)
-- [python.d zscores](https://github.com/netdata/netdata/blob/master/src/collectors/python.d.plugin/zscores/integrations/python.d_zscores.md)
-
### Processes and System Services
- [Applications](https://github.com/netdata/netdata/blob/master/src/collectors/apps.plugin/integrations/applications.md)
@@ -923,6 +919,8 @@ If you don't see the app/service you'd like to monitor in this list:
- [Sphinx](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/modules/prometheus/integrations/sphinx.md)
+- [Typesense](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/modules/typesense/integrations/typesense.md)
+
### Security Systems
- [Certificate Transparency](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/modules/prometheus/integrations/certificate_transparency.md)
@@ -967,7 +965,7 @@ If you don't see the app/service you'd like to monitor in this list:
- [CVMFS clients](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/modules/prometheus/integrations/cvmfs_clients.md)
-- [Ceph](https://github.com/netdata/netdata/blob/master/src/collectors/python.d.plugin/ceph/integrations/ceph.md)
+- [Ceph](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/modules/ceph/integrations/ceph.md)
- [DMCache devices](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/modules/dmcache/integrations/dmcache_devices.md)
@@ -1009,7 +1007,7 @@ If you don't see the app/service you'd like to monitor in this list:
- [Netapp ONTAP API](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/modules/prometheus/integrations/netapp_ontap_api.md)
-- [Samba](https://github.com/netdata/netdata/blob/master/src/collectors/python.d.plugin/samba/integrations/samba.md)
+- [Samba](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/modules/samba/integrations/samba.md)
- [Starwind VSAN VSphere Edition](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/modules/prometheus/integrations/starwind_vsan_vsphere_edition.md)
@@ -1041,7 +1039,7 @@ If you don't see the app/service you'd like to monitor in this list:
- [Site 24x7](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/modules/prometheus/integrations/site_24x7.md)
-- [TCP Endpoints](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/modules/portcheck/integrations/tcp_endpoints.md)
+- [TCP/UDP Endpoints](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/modules/portcheck/integrations/tcp-udp_endpoints.md)
- [Uptimerobot](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/modules/prometheus/integrations/uptimerobot.md)
@@ -1081,7 +1079,7 @@ If you don't see the app/service you'd like to monitor in this list:
### UPS
-- [APC UPS](https://github.com/netdata/netdata/blob/master/src/collectors/charts.d.plugin/apcupsd/integrations/apc_ups.md)
+- [APC UPS](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/modules/apcupsd/integrations/apc_ups.md)
- [Eaton UPS](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/modules/prometheus/integrations/eaton_ups.md)
@@ -1133,6 +1131,8 @@ If you don't see the app/service you'd like to monitor in this list:
- [NGINX Plus](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/modules/nginxplus/integrations/nginx_plus.md)
+- [NGINX Unit](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/modules/nginxunit/integrations/nginx_unit.md)
+
- [NGINX VTS](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/modules/nginxvts/integrations/nginx_vts.md)
- [NGINX](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/modules/nginx/integrations/nginx.md)
@@ -1149,7 +1149,7 @@ If you don't see the app/service you'd like to monitor in this list:
- [Traefik](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/modules/traefik/integrations/traefik.md)
-- [Varnish](https://github.com/netdata/netdata/blob/master/src/collectors/python.d.plugin/varnish/integrations/varnish.md)
+- [Varnish](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/modules/varnish/integrations/varnish.md)
- [Web server log files](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/modules/weblog/integrations/web_server_log_files.md)
@@ -1165,6 +1165,12 @@ If you don't see the app/service you'd like to monitor in this list:
- [MS SQL Server](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/modules/windows/integrations/ms_sql_server.md)
+- [Memory statistics](https://github.com/netdata/netdata/blob/master/src/collectors/windows.plugin/integrations/memory_statistics.md)
+
- [NET Framework](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/modules/windows/integrations/net_framework.md)
+- [System statistics](https://github.com/netdata/netdata/blob/master/src/collectors/windows.plugin/integrations/system_statistics.md)
+
+- [System thermal zone](https://github.com/netdata/netdata/blob/master/src/collectors/windows.plugin/integrations/system_thermal_zone.md)
+
- [Windows](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/modules/windows/integrations/windows.md)
diff --git a/src/collectors/README.md b/src/collectors/README.md
index 0fd5983b7..e7b9c1552 100644
--- a/src/collectors/README.md
+++ b/src/collectors/README.md
@@ -1,62 +1,58 @@
# Collectors
-When Netdata starts, and with zero configuration, it auto-detects thousands of data sources and immediately collects
-per-second metrics.
+Netdata automatically collects per-second metrics from thousands of data sources without any configuration:
-Netdata can immediately collect metrics from these endpoints thanks to 300+ **collectors**, which all come pre-installed
-when you [install Netdata](/packaging/installer/README.md).
+- **Zero-touch setup**: All collectors are pre-installed, allowing you to start collecting detailed metrics right after Netdata starts.
+- **Universal Monitoring**: Monitor virtually anything with Netdata's extensive collector library.
-All collectors are **installed by default** with every installation of Netdata. You do not need to install
-collectors manually to collect metrics from new sources.
-See how you can [monitor anything with Netdata](/src/collectors/COLLECTORS.md).
+If you don't see charts for your application, check our collectors' [configuration reference](/src/collectors/REFERENCE.md) to ensure both the collector and your application are properly configured.
-Upon startup, Netdata will **auto-detect** any application or service that has a collector, as long as both the collector
-and the app/service are configured correctly. If you don't see charts for your application, see
-our [collectors' configuration reference](/src/collectors/REFERENCE.md).
+## Collector Types
-## How Netdata's metrics collectors work
+Netdata's collectors are specialized data collection plugins that gather metrics from various sources. They are divided into two main categories:
-Every collector has two primary jobs:
+| Type | Description | Key Features |
+|----------|-----------------------------------------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
+| Internal | Native collectors that gather system-level metrics | • Written in `C` for optimal performance<br/>• Run as threads within Netdata daemon<br/>• Zero external dependencies<br/>• Minimal system overhead |
+| External | Modular collectors that gather metrics from applications and services | • Support multiple programming languages<br/>• Run as independent processes<br/>• Communicate via pipes with Netdata<br/>• Managed by [plugins.d](/src/plugins.d/README.md)<br/>• Examples: MySQL, Nginx, Redis collectors |
-- Look for exposed metrics at a pre- or user-defined endpoint.
-- Gather exposed metrics and use additional logic to build meaningful, interactive visualizations.
-If the collector finds compatible metrics exposed on the configured endpoint, it begins a per-second collection job. The
-Netdata Agent gathers these metrics, sends them to the
-[database engine for storage](/docs/netdata-agent/configuration/optimizing-metrics-database/change-metrics-storage.md)
-, and immediately
-[visualizes them meaningfully](/docs/dashboards-and-charts/netdata-charts.md)
-on dashboards.
+## Collector Privileges
-Each collector comes with a pre-defined configuration that matches the default setup for that application. This endpoint
-can be a URL and port, a socket, a file, a web page, and more. The endpoint is user-configurable, as are many other
-specifics of what a given collector does.
+Netdata uses various plugins and helper binaries that require elevated privileges to collect system metrics.
+This section outlines the required privileges and how they are configured in different environments.
-## Collector architecture and terminology
+### Privileges
-- **Collectors** are the processes/programs that actually gather metrics from various sources.
+| Plugin/Binary | Privileges (Linux) | Privileges (Non-Linux or Containerized Environment) |
+|------------------------|-------------------------------------------------|-----------------------------------------------------|
+| apps.plugin | CAP_DAC_READ_SEARCH, CAP_SYS_PTRACE | setuid root |
+| debugfs.plugin | CAP_DAC_READ_SEARCH | setuid root |
+| systemd-journal.plugin | CAP_DAC_READ_SEARCH | setuid root |
+| perf.plugin | CAP_PERFMON | setuid root |
+| slabinfo.plugin | CAP_DAC_READ_SEARCH | setuid root |
+| go.d.plugin | CAP_DAC_READ_SEARCH, CAP_NET_ADMIN, CAP_NET_RAW | setuid root |
+| freeipmi.plugin | setuid root | setuid root |
+| nfacct.plugin | setuid root | setuid root |
+| xenstat.plugin | setuid root | setuid root |
+| ioping | setuid root | setuid root |
+| ebpf.plugin | setuid root | setuid root |
+| cgroup-network | setuid root | setuid root |
+| local-listeners | setuid root | setuid root |
+| network-viewer.plugin | setuid root | setuid root |
+| ndsudo | setuid root | setuid root |
-- **Plugins** help manage all the independent data collection processes in a variety of programming languages, based on
- their purpose and performance requirements. There are three types of plugins:
+**About ndsudo**:
- - **Internal** plugins organize collectors that gather metrics from `/proc`, `/sys` and other Linux kernel sources.
- They are written in `C`, and run as threads within the Netdata daemon.
+`ndsudo` is a purpose-built privilege escalation utility for Netdata that executes a predefined set of commands with root privileges. Unlike traditional `sudo`, it operates with a [hard-coded list of allowed commands](https://github.com/netdata/netdata/blob/master/src/collectors/utils/ndsudo.c), providing better security through reduced scope and eliminating the need for `sudo` configuration.
- - **External** plugins organize collectors that gather metrics from external processes, such as a MySQL database or
- Nginx web server. They can be written in any language, and the `netdata` daemon spawns them as long-running
- independent processes. They communicate with the daemon via pipes. All external plugins are managed by
- [plugins.d](/src/collectors/plugins.d/README.md), which provides additional management options.
+It’s used by the `go.d.plugin` to collect data by executing certain binaries that require root access.
-- **Orchestrators** are external plugins that run and manage one or more modules. They run as independent processes.
- The Go orchestrator is in active development.
+### File Permissions and Ownership
- - [go.d.plugin](/src/go/plugin/go.d/README.md): An orchestrator for data
- collection modules written in `go`.
+To ensure security, all plugin and helper binary files have the following permissions and ownership:
- - [python.d.plugin](/src/collectors/python.d.plugin/README.md):
- An orchestrator for data collection modules written in `python` v2/v3.
+- **Ownership**: `root:netdata`.
+- **Permissions**: `0750` (for non-setuid binaries) or `4750` (for setuid binaries).
- - [charts.d.plugin](/src/collectors/charts.d.plugin/README.md):
- An orchestrator for data collection modules written in`bash` v4+.
-
-- **Modules** are the individual programs controlled by an orchestrator to collect data from a specific application, or type of endpoint.
+This configuration limits access to the files to the `netdata` user and the `root` user, while allowing execution by the `netdata` user.
diff --git a/src/collectors/REFERENCE.md b/src/collectors/REFERENCE.md
index e480a16d8..af745013c 100644
--- a/src/collectors/REFERENCE.md
+++ b/src/collectors/REFERENCE.md
@@ -1,32 +1,23 @@
-<!--
-title: "Collectors configuration reference"
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/collectors/REFERENCE.md"
-sidebar_label: "Collectors configuration"
-learn_status: "Published"
-learn_topic_type: "Tasks"
-learn_rel_path: "Configuration"
--->
-
# Collectors configuration reference
-The list of supported collectors can be found in [the documentation](/src/collectors/COLLECTORS.md),
-and on [our website](https://www.netdata.cloud/integrations). The documentation of each collector provides all the
-necessary configuration options and prerequisites for that collector. In most cases, either the charts are automatically generated
+The list of supported collectors can be found in [the documentation](/src/collectors/COLLECTORS.md),
+and on [our website](https://www.netdata.cloud/integrations). The documentation of each collector provides all the
+necessary configuration options and prerequisites for that collector. In most cases, either the charts are automatically generated
without any configuration, or you just fulfil those prerequisites and [configure the collector](#configure-a-collector).
-If the application you are interested in monitoring is not listed in our integrations, the collectors list includes
-the available options to
+If the application you are interested in monitoring is not listed in our integrations, the collectors list includes
+the available options to
[add your application to Netdata](https://github.com/netdata/netdata/edit/master/src/collectors/COLLECTORS.md#add-your-application-to-netdata).
-If we do support your collector but the charts described in the documentation don't appear on your dashboard, the reason will
+If we do support your collector but the charts described in the documentation don't appear on your dashboard, the reason will
be one of the following:
-- The entire data collection plugin is disabled by default. Read how to [enable and disable plugins](#enable-and-disable-plugins)
+- The entire data collection plugin is disabled by default. Read how to [enable and disable plugins](#enable-and-disable-plugins)
-- The data collection plugin is enabled, but a specific data collection module is disabled. Read how to
- [enable and disable a specific collection module](#enable-and-disable-a-specific-collection-module).
+- The data collection plugin is enabled, but a specific data collection module is disabled. Read how to
+ [enable and disable a specific collection module](#enable-and-disable-a-specific-collection-module).
-- Autodetection failed. Read how to [configure](#configure-a-collector) and [troubleshoot](#troubleshoot-a-collector) a collector.
+- Autodetection failed. Read how to [configure](#configure-a-collector) and [troubleshoot](#troubleshoot-a-collector) a collector.
## Enable and disable plugins
@@ -34,28 +25,28 @@ You can enable or disable individual plugins by opening `netdata.conf` and scrol
This section features a list of Netdata's plugins, with a boolean setting to enable or disable them. The exception is
`statsd.plugin`, which has its own `[statsd]` section. Your `[plugins]` section should look similar to this:
-```conf
+```text
[plugins]
- # timex = yes
- # idlejitter = yes
- # netdata monitoring = yes
- # tc = yes
- # diskspace = yes
- # proc = yes
- # cgroups = yes
- # enable running new plugins = yes
- # check for new plugins every = 60
- # slabinfo = no
- # python.d = yes
- # perf = yes
- # ioping = yes
- # fping = yes
- # nfacct = yes
- # go.d = yes
- # apps = yes
- # ebpf = yes
- # charts.d = yes
- # statsd = yes
+ # timex = yes
+ # idlejitter = yes
+ # netdata monitoring = yes
+ # tc = yes
+ # diskspace = yes
+ # proc = yes
+ # cgroups = yes
+ # enable running new plugins = yes
+ # check for new plugins every = 60
+ # slabinfo = no
+ # python.d = yes
+ # perf = yes
+ # ioping = yes
+ # fping = yes
+ # nfacct = yes
+ # go.d = yes
+ # apps = yes
+ # ebpf = yes
+ # charts.d = yes
+ # statsd = yes
```
By default, most plugins are enabled, so you don't need to enable them explicitly to use their collectors. To enable or
@@ -63,11 +54,11 @@ disable any specific plugin, remove the comment (`#`) and change the boolean set
## Enable and disable a specific collection module
-You can enable/disable of the collection modules supported by `go.d`, `python.d` or `charts.d` individually, using the
-configuration file of that orchestrator. For example, you can change the behavior of the Go orchestrator, or any of its
+You can enable/disable of the collection modules supported by `go.d`, `python.d` or `charts.d` individually, using the
+configuration file of that orchestrator. For example, you can change the behavior of the Go orchestrator, or any of its
collectors, by editing `go.d.conf`.
-Use `edit-config` from your [Netdata config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory)
+Use `edit-config` from your [Netdata config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory)
to open the orchestrator primary configuration file:
```bash
@@ -79,20 +70,19 @@ Within this file, you can either disable the orchestrator entirely (`enabled: ye
enable/disable it with `yes` and `no` settings. Uncomment any line you change to ensure the Netdata daemon reads it on
start.
-After you make your changes, restart the Agent with `sudo systemctl restart netdata`, or the [appropriate
-method](/packaging/installer/README.md#maintaining-a-netdata-agent-installation) for your system.
+After you make your changes, restart the Agent with the [appropriate method](/docs/netdata-agent/start-stop-restart.md) for your system.
## Configure a collector
Most collector modules come with **auto-detection**, configured to work out-of-the-box on popular operating systems with
-the default settings.
+the default settings.
However, there are cases that auto-detection fails. Usually, the reason is that the applications to be monitored do not
allow Netdata to connect. In most of the cases, allowing the user `netdata` from `localhost` to connect and collect
metrics, will automatically enable data collection for the application in question (it will require a Netdata restart).
When Netdata starts up, each collector searches for exposed metrics on the default endpoint established by that service
-or application's standard installation procedure. For example,
+or application's standard installation procedure. For example,
the [Nginx collector](/src/go/plugin/go.d/modules/nginx/README.md) searches at
`http://127.0.0.1/stub_status` for exposed metrics in the correct format. If an Nginx web server is running and exposes
metrics on that endpoint, the collector begins gathering them.
@@ -100,12 +90,12 @@ metrics on that endpoint, the collector begins gathering them.
However, not every node or infrastructure uses standard ports, paths, files, or naming conventions. You may need to
enable or configure a collector to gather all available metrics from your systems, containers, or applications.
-First, [find the collector](/src/collectors/COLLECTORS.md) you want to edit
-and open its documentation. Some software has collectors written in multiple languages. In these cases, you should always
+First, [find the collector](/src/collectors/COLLECTORS.md) you want to edit
+and open its documentation. Some software has collectors written in multiple languages. In these cases, you should always
pick the collector written in Go.
-Use `edit-config` from your
-[Netdata config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory)
+Use `edit-config` from your
+[Netdata config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory)
to open a collector's configuration file. For example, edit the Nginx collector with the following:
```bash
@@ -117,8 +107,7 @@ according to your needs. In addition, every collector's documentation shows the
configure that collector. Uncomment any line you change to ensure the collector's orchestrator or the Netdata daemon
read it on start.
-After you make your changes, restart the Agent with `sudo systemctl restart netdata`, or the [appropriate
-method](/packaging/installer/README.md#maintaining-a-netdata-agent-installation) for your system.
+After you make your changes, restart the Agent with the [appropriate method](/docs/netdata-agent/start-stop-restart.md) for your system.
## Troubleshoot a collector
@@ -131,7 +120,7 @@ cd /usr/libexec/netdata/plugins.d/
sudo su -s /bin/bash netdata
```
-The next step is based on the collector's orchestrator.
+The next step is based on the collector's orchestrator.
```bash
# Go orchestrator (go.d.plugin)
@@ -145,5 +134,5 @@ The next step is based on the collector's orchestrator.
```
The output from the relevant command will provide valuable troubleshooting information. If you can't figure out how to
-enable the collector using the details from this output, feel free to [join our Discord server](https://discord.com/invite/2mEmfW735j),
+enable the collector using the details from this output, feel free to [join our Discord server](https://discord.com/invite/2mEmfW735j),
to get help from our experts.
diff --git a/src/collectors/all.h b/src/collectors/all.h
index 3b96faa10..6892d131e 100644
--- a/src/collectors/all.h
+++ b/src/collectors/all.h
@@ -55,7 +55,54 @@
#define NETDATA_CHART_PRIO_SYSTEM_IPC_SHARED_MEM_CALLS 1207
#define NETDATA_CHART_PRIO_SYSTEM_PACKETS 7001 // freebsd only
#define NETDATA_CHART_PRIO_WINDOWS_THREADS 8001 // Windows only
+#define NETDATA_CHART_PRIO_WINDOWS_THERMAL_ZONES 8002 // Windows only
+// ----------------------------------------------------------------------------
+// Hyper-V
+
+#define NETDATA_CHART_PRIO_WINDOWS_HYPERV_VM_CPU_USAGE 20000
+#define NETDATA_CHART_PRIO_WINDOWS_HYPERV_VM_CPU_USAGE_BY_RUN_CONTEXT 20010
+#define NETDATA_CHART_PRIO_WINDOWS_HYPERV_VM_MEMORY_PHYSICAL 20020
+#define NETDATA_CHART_PRIO_WINDOWS_HYPERV_VM_MEMORY_PHYSICAL_GUEST_VISIBLE 20030
+#define NETDATA_CHART_PRIO_WINDOWS_HYPERV_VM_MEMORY_PRESSURE_CURRENT 20040
+#define NETDATA_CHART_PRIO_WINDOWS_HYPERV_VM_VID_PHYSICAL_PAGES_ALLOCATED 20050
+#define NETDATA_CHART_PRIO_WINDOWS_HYPERV_VM_VID_REMOTE_PHYSICAL_PAGES 20060
+#define NETDATA_CHART_PRIO_WINDOWS_HYPERV_VM_NET_INTERFACE_TRAFFIC 20070
+#define NETDATA_CHART_PRIO_WINDOWS_HYPERV_VM_NET_INTERFACE_IPSEC_TRAFFIC 20080
+#define NETDATA_CHART_PRIO_WINDOWS_HYPERV_VM_NET_INTERFACE_PACKETS 20090
+#define NETDATA_CHART_PRIO_WINDOWS_HYPERV_VM_NET_INTERFACE_BROADCAST_PACKETS 20100
+#define NETDATA_CHART_PRIO_WINDOWS_HYPERV_VM_NET_INTERFACE_MULTICAST_PACKETS 20110
+#define NETDATA_CHART_PRIO_WINDOWS_HYPERV_VM_NET_INTERFACE_DIRECTED_PACKETS 20120
+#define NETDATA_CHART_PRIO_WINDOWS_HYPERV_VM_NET_INTERFACE_PACKETS_DROPPED 20130
+#define NETDATA_CHART_PRIO_WINDOWS_HYPERV_VM_STORAGE_DEVICE_BYTES 20140
+#define NETDATA_CHART_PRIO_WINDOWS_HYPERV_VM_STORAGE_DEVICE_ERRORS 20150
+#define NETDATA_CHART_PRIO_WINDOWS_HYPERV_VM_STORAGE_DEVICE_OPERATIONS 20160
+#define NETDATA_CHART_PRIO_WINDOWS_HYPERV_VMS_HEALTH 20170
+
+#define NETDATA_CHART_PRIO_WINDOWS_HYPERV_VSWITCH_TRAFFIC 20400
+#define NETDATA_CHART_PRIO_WINDOWS_HYPERV_VSWITCH_PACKETS 20410
+#define NETDATA_CHART_PRIO_WINDOWS_HYPERV_VSWITCH_BROADCAST_PACKETS 20420
+#define NETDATA_CHART_PRIO_WINDOWS_HYPERV_VSWITCH_MULTICAST_PACKETS 20430
+#define NETDATA_CHART_PRIO_WINDOWS_HYPERV_VSWITCH_DIRECTED_PACKETS 20440
+#define NETDATA_CHART_PRIO_WINDOWS_HYPERV_VSWITCH_PACKETS_FLOODED 20450
+#define NETDATA_CHART_PRIO_WINDOWS_HYPERV_VSWITCH_DROPPED_PACKETS 20460
+#define NETDATA_CHART_PRIO_WINDOWS_HYPERV_VSWITCH_EXTENSIONS_DROPPED_PACKETS 20470
+#define NETDATA_CHART_PRIO_WINDOWS_HYPERV_VSWITCH_LEARNED_MAC_ADDRESSES 20470
+#define NETDATA_CHART_PRIO_WINDOWS_HYPERV_VSWITCH_PURGED_MAC_ADDRESSES 20480
+
+#define NETDATA_CHART_PRIO_WINDOWS_HYPERV_ROOT_PARTITION_IO_TLB_FLUSH 20600
+#define NETDATA_CHART_PRIO_WINDOWS_HYPERV_ROOT_PARTITION_VIRTUAL_TLB_FLUSH_ENTRIES 20610
+#define NETDATA_CHART_PRIO_WINDOWS_HYPERV_ROOT_PARTITION_VIRTUAL_TLB_PAGES 20620
+#define NETDATA_CHART_PRIO_WINDOWS_HYPERV_ROOT_PARTITION_ADDRESS_SPACE 20630
+#define NETDATA_CHART_PRIO_WINDOWS_HYPERV_ROOT_PARTITION_ATTACHED_DEVICES 20640
+#define NETDATA_CHART_PRIO_WINDOWS_HYPERV_ROOT_PARTITION_DEVICE_DMA_ERRORS 20650
+#define NETDATA_CHART_PRIO_WINDOWS_HYPERV_ROOT_PARTITION_DEVICE_INTERRUPT_ERRORS 20660
+#define NETDATA_CHART_PRIO_WINDOWS_HYPERV_ROOT_PARTITION_DEVICE_INTERRUPT_THROTTLE_EVENTS 20670
+#define NETDATA_CHART_PRIO_WINDOWS_HYPERV_ROOT_PARTITION_DEPOSITED_PAGES 20680
+#define NETDATA_CHART_PRIO_WINDOWS_HYPERV_ROOT_PARTITION_GPA_SPACE_PAGES 20690
+#define NETDATA_CHART_PRIO_WINDOWS_HYPERV_ROOT_PARTITION_DEVICE_SPACE_PAGES 20700
+#define NETDATA_CHART_PRIO_WINDOWS_HYPERV_ROOT_PARTITION_GPA_SPACE_MODIFICATIONS 20710
+// ----------------------------------------------------------------------------
// CPU per core
@@ -76,7 +123,9 @@
#define NETDATA_CHART_PRIO_MEM_SYSTEM_COMMITTED 1030
#define NETDATA_CHART_PRIO_MEM_SWAP 1035
#define NETDATA_CHART_PRIO_MEM_SWAP_CALLS 1037
+#define NETDATA_CHART_PRIO_MEM_SWAP_PAGES 1037 // Windows only
#define NETDATA_CHART_PRIO_MEM_SWAPIO 1038
+#define NETDATA_CHART_PRIO_MEM_SYSTEM_POOL 1039 // Windows only
#define NETDATA_CHART_PRIO_MEM_ZSWAP 1036
#define NETDATA_CHART_PRIO_MEM_ZSWAPIO 1037
#define NETDATA_CHART_PRIO_MEM_ZSWAP_COMPRESS_RATIO 1038
@@ -109,7 +158,9 @@
#define NETDATA_CHART_PRIO_MEM_KSM_COW 1303
#define NETDATA_CHART_PRIO_MEM_BALLOON 1350
#define NETDATA_CHART_PRIO_MEM_NUMA 1400
-#define NETDATA_CHART_PRIO_MEM_NUMA_NODES 1410
+#define NETDATA_CHART_PRIO_MEM_NUMA_NODES_NUMASTAT 1410
+#define NETDATA_CHART_PRIO_MEM_NUMA_NODES_MEMINFO 1411
+#define NETDATA_CHART_PRIO_MEM_NUMA_NODES_ACTIVITY 1412
#define NETDATA_CHART_PRIO_MEM_PAGEFRAG 1450
#define NETDATA_CHART_PRIO_MEM_HW 1500
#define NETDATA_CHART_PRIO_MEM_HW_ECC_CE 1550
@@ -134,6 +185,7 @@
#define NETDATA_CHART_PRIO_DISK_MOPS 2080
#define NETDATA_CHART_PRIO_DISK_IOTIME 2090
#define NETDATA_CHART_PRIO_DISK_LATENCY 2095
+#define NETDATA_CHART_PRIO_DISK_SPLIT 2096
#define NETDATA_CHART_PRIO_BCACHE_CACHE_ALLOC 2120
#define NETDATA_CHART_PRIO_BCACHE_HIT_RATIO 2120
#define NETDATA_CHART_PRIO_BCACHE_RATES 2121
@@ -316,6 +368,7 @@
#define NETDATA_CHART_PRIO_IPV6_TCP_PACKETS 6130
#define NETDATA_CHART_PRIO_IPV6_TCP_SOCKETS 6140
#define NETDATA_CHART_PRIO_IPV6_ICMP_PACKETS 6145
+#define NETDATA_CHART_PRIO_IPV6_ICMP_MESSAGES 6146
#define NETDATA_CHART_PRIO_IPV6_ICMP 6150
#define NETDATA_CHART_PRIO_IPV6_ICMP_REDIR 6155
#define NETDATA_CHART_PRIO_IPV6_ICMP_ERRORS 6160
diff --git a/src/collectors/apps.plugin/README.md b/src/collectors/apps.plugin/README.md
index ced91d8ae..f3822e466 100644
--- a/src/collectors/apps.plugin/README.md
+++ b/src/collectors/apps.plugin/README.md
@@ -1,402 +1,180 @@
-<!--
-title: "Application monitoring (apps.plugin)"
-sidebar_label: "Application monitoring "
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/collectors/apps.plugin/README.md"
-learn_status: "Published"
-learn_topic_type: "References"
-learn_rel_path: "Integrations/Monitor/System metrics"
--->
+# Applications monitoring (apps.plugin)
-# Application monitoring (apps.plugin)
+`apps.plugin` monitors the resources utilization of all processes running.
-`apps.plugin` breaks down system resource usage to **processes**, **users** and **user groups**.
-It is enabled by default on every Netdata installation.
+## Process Aggregation and Grouping
-To achieve this task, it iterates through the whole process tree, collecting resource usage information
-for every process found running.
+`apps.plugin` aggregates processes in three distinct ways to provide a more insightful breakdown of resource utilization:
-Since Netdata needs to present this information in charts and track them through time,
-instead of presenting a `top` like list, `apps.plugin` uses a pre-defined list of **process groups**
-to which it assigns all running processes. This list is customizable via `apps_groups.conf`, and Netdata
-ships with a good default for most cases (to edit it on your system run `/etc/netdata/edit-config apps_groups.conf`).
+| Grouping | Description |
+|------------|----------------------------------------------------------------------------------------------------------------------------------------------------|
+| App | Grouped by the position in the process tree. This is customizable and allows aggregation by process managers and individual processes of interest. |
+| User | Grouped by the effective user (UID) under which the processes run. |
+| User Group | Grouped by the effective group (GID) under which the processes run. |
-So, `apps.plugin` builds a process tree (much like `ps fax` does in Linux), and groups
-processes together (evaluating both child and parent processes) so that the result is always a list with
-a predefined set of members (of course, only process groups found running are reported).
+## Short-Lived Process Handling
-> If you find that `apps.plugin` categorizes standard applications as `other`, we would be
-> glad to accept pull requests improving the defaults shipped with Netdata in `apps_groups.conf`.
-
-Unlike traditional process monitoring tools (like `top`), `apps.plugin` is able to account the resource
-utilization of exit processes. Their utilization is accounted at their currently running parents.
-So, `apps.plugin` is perfectly able to measure the resources used by shell scripts and other processes
-that fork/spawn other short-lived processes hundreds of times per second.
+`apps.plugin` accurately captures resource utilization for both running and exited processes, ensuring that the impact of short-lived subprocesses is fully accounted for.
+This is particularly valuable for scenarios where processes spawn numerous short-lived subprocesses, such as shell scripts that fork hundreds or thousands of times per second.
+Even though these subprocesses may have a brief lifespan, `apps.plugin` effectively aggregates their resource utilization, providing a comprehensive overview of how resources are shared among all processes within the system.
## Charts
-`apps.plugin` provides charts for 3 sections:
-
-1. Per application charts as **Applications** at Netdata dashboards
-2. Per user charts as **Users** at Netdata dashboards
-3. Per user group charts as **User Groups** at Netdata dashboards
-
-Each of these sections provides the same number of charts:
-
-- CPU utilization (`apps.cpu`)
- - Total CPU usage
- - User/system CPU usage (`apps.cpu_user`/`apps.cpu_system`)
-- Disk I/O
- - Physical reads/writes (`apps.preads`/`apps.pwrites`)
- - Logical reads/writes (`apps.lreads`/`apps.lwrites`)
- - Open unique files (if a file is found open multiple times, it is counted just once, `apps.files`)
-- Memory
- - Real Memory Used (non-shared, `apps.mem`)
- - Virtual Memory Allocated (`apps.vmem`)
- - Minor page faults (i.e. memory activity, `apps.minor_faults`)
-- Processes
- - Threads running (`apps.threads`)
- - Processes running (`apps.processes`)
- - Carried over uptime (since the last Netdata Agent restart, `apps.uptime`)
- - Minimum uptime (`apps.uptime_min`)
- - Average uptime (`apps.uptime_average`)
- - Maximum uptime (`apps.uptime_max`)
- - Pipes open (`apps.pipes`)
-- Swap memory
- - Swap memory used (`apps.swap`)
- - Major page faults (i.e. swap activity, `apps.major_faults`)
-- Network
- - Sockets open (`apps.sockets`)
-
+`apps.plugin` offers a set of charts for three groups within the **System->Processes** section of the Netdata dashboard: **Apps**, **Users**, and **Groups**.
+
+Each of these sections presents the same number of charts:
+
+- CPU utilization
+ - Total CPU usage
+ - User/system CPU usage
+- Memory
+ - Real Memory Used (non-shared)
+ - Virtual Memory Allocated
+ - Minor page faults (i.e. memory activity)
+- Swap memory
+ - Swap memory used
+ - Major page faults (i.e. swap activity)
+- Disk
+ - Physical reads/writes
+ - Logical reads/writes
+- Tasks
+ - Threads
+ - Processes
+- FDs
+ - Open file descriptors limit %
+ - Open file descriptors
+- Uptime
+ - Carried over uptime (since the last Netdata Agent restart)
+
In addition, if the [eBPF collector](/src/collectors/ebpf.plugin/README.md) is running, your dashboard will also show an
additional [list of charts](/src/collectors/ebpf.plugin/README.md#integration-with-appsplugin) using low-level Linux
metrics.
-The above are reported:
-
-- For **Applications** per target configured.
-- For **Users** per username or UID (when the username is not available).
-- For **User Groups** per group name or GID (when group name is not available).
-
## Performance
-`apps.plugin` is a complex piece of software and has a lot of work to do
-We are proud that `apps.plugin` is a lot faster compared to any other similar tool,
-while collecting a lot more information for the processes, however the fact is that
-this plugin requires more CPU resources than the `netdata` daemon itself.
+`apps.plugin` is designed to be highly efficient, collecting significantly more process information than other similar tools while maintaining exceptional speed.
+However, due to its comprehensive approach of traversing the entire process tree on each iteration, its resource usage may become noticeable, especially on systems with a large number of processes.
-Under Linux, for each process running, `apps.plugin` reads several `/proc` files
-per process. Doing this work per-second, especially on hosts with several thousands
-of processes, may increase the CPU resources consumed by the plugin.
+Under Linux, `apps.plugin` reads multiple `/proc` files for each running process, performing this operation on a per-second basis.
+This can lead to increased CPU consumption on hosts with several thousands of processes.
-In such cases, you many need to lower its data collection frequency.
+In such cases, you may need to adjust the data collection frequency to reduce the plugin's resource usage.
To do this, edit `/etc/netdata/netdata.conf` and find this section:
-```
+```text
[plugin:apps]
- # update every = 1
- # command options =
+ # update every = 1
+ # command options =
```
-Uncomment the line `update every` and set it to a higher number. If you just set it to `2`,
-its CPU resources will be cut in half, and data collection will be once every 2 seconds.
+Uncomment the `update every` line and set it to a higher value.
+For example, setting it to 2 will halve the plugin's CPU usage and collect data once every 2 seconds.
## Configuration
-The configuration file is `/etc/netdata/apps_groups.conf`. To edit it on your system, run `/etc/netdata/edit-config apps_groups.conf`.
-
-The configuration file works accepts multiple lines, each having this format:
-
-```txt
-group: process1 process2 ...
-```
-
-Each group can be given multiple times, to add more processes to it.
-
-For the **Applications** section, only groups configured in this file are reported.
-All other processes will be reported as `other`.
-
-For each process given, its whole process tree will be grouped, not just the process matched.
-The plugin will include both parents and children. If including the parents into the group is
-undesirable, the line `other: *` should be appended to the `apps_groups.conf`.
-
-The process names are the ones returned by:
-
-- `ps -e` or `cat /proc/PID/stat`
-- in case of substring mode (see below): `/proc/PID/cmdline`
-
-To add process names with spaces, enclose them in quotes (single or double)
-example: `'Plex Media Serv'` or `"my other process"`.
-
-You can add an asterisk `*` at the beginning and/or the end of a process:
-
-- `*name` _suffix_ mode: will search for processes ending with `name` (at `/proc/PID/stat`)
-- `name*` _prefix_ mode: will search for processes beginning with `name` (at `/proc/PID/stat`)
-- `*name*` _substring_ mode: will search for `name` in the whole command line (at `/proc/PID/cmdline`)
-
-If you enter even just one _name_ (substring), `apps.plugin` will process
-`/proc/PID/cmdline` for all processes (of course only once per process: when they are first seen).
-
-To add processes with single quotes, enclose them in double quotes: `"process with this ' single quote"`
-
-To add processes with double quotes, enclose them in single quotes: `'process with this " double quote'`
-
-If a group or process name starts with a `-`, the dimension will be hidden from the chart (cpu chart only).
-
-If a process starts with a `+`, debugging will be enabled for it (debugging produces a lot of output - do not enable it in production systems).
-
-You can add any number of groups. Only the ones found running will affect the charts generated.
-However, producing charts with hundreds of dimensions may slow down your web browser.
-
-The order of the entries in this list is important: the first that matches a process is used, so put important
-ones at the top. Processes not matched by any row, will inherit it from their parents or children.
-
-The order also controls the order of the dimensions on the generated charts (although applications started
-after apps.plugin is started, will be appended to the existing list of dimensions the `netdata` daemon maintains).
-
-There are a few command line options you can pass to `apps.plugin`. The list of available options can be acquired with the `--help` flag. The options can be set in the `netdata.conf` file. For example, to disable user and user group charts you should set
-
-```
-[plugin:apps]
- command options = without-users without-groups
-```
-
-### Integration with eBPF
-
-If you don't see charts under the **eBPF syscall** or **eBPF net** sections, you should edit your
-[`ebpf.d.conf`](/src/collectors/ebpf.plugin/README.md#configure-the-ebpf-collector) file to ensure the eBPF program is enabled.
-
-Also see our [guide on troubleshooting apps with eBPF
-metrics](/docs/developer-and-contributor-corner/monitor-debug-applications-ebpf.md) for ideas on how to interpret these charts in a
-few scenarios.
-
-## Permissions
-
-`apps.plugin` requires additional privileges to collect all the information it needs.
-The problem is described in issue #157.
-
-When Netdata is installed, `apps.plugin` is given the capabilities `cap_dac_read_search,cap_sys_ptrace+ep`.
-If this fails (i.e. `setcap` fails), `apps.plugin` is setuid to `root`.
+The configuration file is `/etc/netdata/apps_groups.conf`. You can edit this
+file using our [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script.
-### linux capabilities in containers
+### Configuring process managers
-There are a few cases, like `docker` and `virtuozzo` containers, where `setcap` succeeds, but the capabilities
-are silently ignored (in `lxc` containers `setcap` fails).
+`apps.plugin` needs to know the common process managers, which are the processes that spawn other processes.
+These process managers allow `apps.plugin` to automatically include their subprocesses in the monitoring process, ensuring that important processes are not overlooked.
-In this case, you will have to setuid to root `apps.plugin` by running these commands:
+- Process managers are configured in the `apps_groups.conf` file using the `managers:` prefix, as follows:
-```sh
-chown root:netdata /usr/libexec/netdata/plugins.d/apps.plugin
-chmod 4750 /usr/libexec/netdata/plugins.d/apps.plugin
-```
+ ```text
+ managers: process1 process2 process3
+ ```
-You will have to run these, every time you update Netdata.
-
-## Security
+- Multiple lines can be used to define additional process managers, all starting with `managers:`.
-`apps.plugin` performs a hard-coded function of building the process tree in memory,
-iterating forever, collecting metrics for each running process and sending them to Netdata.
-This is a one-way communication, from `apps.plugin` to Netdata.
+- If you want to clear all existing process managers, you can use the line `managers: clear`. This will remove all previously configured managers, allowing you to provide a new list.
-So, since `apps.plugin` cannot be instructed by Netdata for the actions it performs,
-we think it is pretty safe to allow it to have these increased privileges.
+### Configuring interpreters
-Keep in mind that `apps.plugin` will still run without escalated permissions,
-but it will not be able to collect all the information.
+Interpreted languages like `python`, `bash`, `sh`, `node`, and others may obfuscate the actual name of a process.
-## Application Badges
+To address this, `apps.plugin` allows you to configure interpreters and specify that the actual process name can be found in one of the command-line parameters of the interpreter.
+When a process matches a configured interpreter, `apps.plugin` will examine all the parameters of the interpreter and locate the first parameter that is an absolute filename existing on disk. If such a filename is found, `apps.plugin` will name the process using the name of that filename.
-You can create badges that you can embed anywhere you like, with URLs like this:
+- Interpreters are configured in the `apps_groups.conf` file using the `interpreters:` prefix, as follows:
+```text
+interpreters: process1 process2 process3
```
-https://your.netdata.ip:19999/api/v1/badge.svg?chart=apps.processes&dimensions=myapp&value_color=green%3E0%7Cred
-```
-
-The color expression unescaped is this: `value_color=green>0|red`.
-
-Here is an example for the process group `sql` at `https://registry.my-netdata.io`:
-![image](https://registry.my-netdata.io/api/v1/badge.svg?chart=apps.processes&dimensions=sql&value_color=green%3E0%7Cred)
+- Multiple lines can be used to define additional process managers, all starting with `interpreters:`.
-Netdata is able to give you a lot more badges for your app.
-Examples below for process group `sql`:
+- If you want to clear all existing process interpreters, you can use the line `interpreters: clear`. This will remove all previously configured interpreters, allowing you to provide a new list.
-- CPU usage: ![image](https://registry.my-netdata.io/api/v1/badge.svg?chart=apps.cpu&dimensions=sql&value_color=green=0%7Corange%3C50%7Cred)
-- Disk Physical Reads ![image](https://registry.my-netdata.io/api/v1/badge.svg?chart=apps.preads&dimensions=sql&value_color=green%3C100%7Corange%3C1000%7Cred)
-- Disk Physical Writes ![image](https://registry.my-netdata.io/api/v1/badge.svg?chart=apps.pwrites&dimensions=sql&value_color=green%3C100%7Corange%3C1000%7Cred)
-- Disk Logical Reads ![image](https://registry.my-netdata.io/api/v1/badge.svg?chart=apps.lreads&dimensions=sql&value_color=green%3C100%7Corange%3C1000%7Cred)
-- Disk Logical Writes ![image](https://registry.my-netdata.io/api/v1/badge.svg?chart=apps.lwrites&dimensions=sql&value_color=green%3C100%7Corange%3C1000%7Cred)
-- Open Files ![image](https://registry.my-netdata.io/api/v1/badge.svg?chart=apps.fds_files&dimensions=sql&value_color=green%3E30%7Cred)
-- Real Memory ![image](https://registry.my-netdata.io/api/v1/badge.svg?chart=apps.mem&dimensions=sql&value_color=green%3C100%7Corange%3C200%7Cred)
-- Virtual Memory ![image](https://registry.my-netdata.io/api/v1/badge.svg?chart=apps.vmem&dimensions=sql&value_color=green%3C100%7Corange%3C1000%7Cred)
-- Swap Memory ![image](https://registry.my-netdata.io/api/v1/badge.svg?chart=apps.swap&dimensions=sql&value_color=green=0%7Cred)
-- Minor Page Faults ![image](https://registry.my-netdata.io/api/v1/badge.svg?chart=apps.minor_faults&dimensions=sql&value_color=green%3C100%7Corange%3C1000%7Cred)
-- Processes ![image](https://registry.my-netdata.io/api/v1/badge.svg?chart=apps.processes&dimensions=sql&value_color=green%3E0%7Cred)
-- Threads ![image](https://registry.my-netdata.io/api/v1/badge.svg?chart=apps.threads&dimensions=sql&value_color=green%3E=28%7Cred)
-- Major Faults (swap activity) ![image](https://registry.my-netdata.io/api/v1/badge.svg?chart=apps.major_faults&dimensions=sql&value_color=green=0%7Cred)
-- Open Pipes ![image](https://registry.my-netdata.io/api/v1/badge.svg?chart=apps.fds_pipes&dimensions=sql&value_color=green=0%7Cred)
-- Open Sockets ![image](https://registry.my-netdata.io/api/v1/badge.svg?chart=apps.fds_sockets&dimensions=sql&value_color=green%3E=3%7Cred)
+### Configuring process groups and renaming processes
-For more information about badges check [Generating Badges](/src/web/api/badges/README.md)
+- The configuration file supports multiple lines, each following this format:
-## Comparison with console tools
+ ```text
+ group: process1 process2 ...
+ ```
-SSH to a server running Netdata and execute this:
-
-```sh
-while true; do ls -l /var/run >/dev/null; done
-```
+- You can define a group multiple times to include additional processes within it.
-In most systems `/var/run` is a `tmpfs` device, so there is nothing that can stop this command
-from consuming entirely one of the CPU cores of the machine.
+- For each process specified, all of its subprocesses will be automatically grouped, not just the matched process itself.
-As we will see below, **none** of the console performance monitoring tools can report that this
-command is using 100% CPU. They do report of course that the CPU is busy, but **they fail to
-identify the process that consumes so much CPU**.
+### Matching processes
-Here is what common Linux console monitoring tools report:
+The following methods are used for matching against the specified patterns:
-### top
-
-`top` reports that `bash` is using just 14%.
-
-If you check the total system CPU utilization, it says there is no idle CPU at all, but `top`
-fails to provide a breakdown of the CPU consumption in the system. The sum of the CPU utilization
-of all processes reported by `top`, is 15.6%.
-
-```
-top - 18:46:28 up 3 days, 20:14, 2 users, load average: 0.22, 0.05, 0.02
-Tasks: 76 total, 2 running, 74 sleeping, 0 stopped, 0 zombie
-%Cpu(s): 32.8 us, 65.6 sy, 0.0 ni, 0.0 id, 0.0 wa, 1.3 hi, 0.3 si, 0.0 st
-KiB Mem : 1016576 total, 244112 free, 52012 used, 720452 buff/cache
-KiB Swap: 0 total, 0 free, 0 used. 753712 avail Mem
-
- PID USER PR NI VIRT RES SHR S %CPU %MEM TIME+ COMMAND
-12789 root 20 0 14980 4180 3020 S 14.0 0.4 0:02.82 bash
- 9 root 20 0 0 0 0 S 1.0 0.0 0:22.36 rcuos/0
- 642 netdata 20 0 132024 20112 2660 S 0.3 2.0 14:26.29 netdata
-12522 netdata 20 0 9508 2476 1828 S 0.3 0.2 0:02.26 apps.plugin
- 1 root 20 0 67196 10216 7500 S 0.0 1.0 0:04.83 systemd
- 2 root 20 0 0 0 0 S 0.0 0.0 0:00.00 kthreadd
-```
-
-### htop
-
-Exactly like `top`, `htop` is providing an incomplete breakdown of the system CPU utilization.
-
-```
- CPU[||||||||||||||||||||||||100.0%] Tasks: 27, 11 thr; 2 running
- Mem[||||||||||||||||||||85.4M/993M] Load average: 1.16 0.88 0.90
- Swp[ 0K/0K] Uptime: 3 days, 21:37:03
-
- PID USER PRI NI VIRT RES SHR S CPU% MEM% TIME+ Command
-12789 root 20 0 15104 4484 3208 S 14.0 0.4 10:57.15 -bash
- 7024 netdata 20 0 9544 2480 1744 S 0.7 0.2 0:00.88 /usr/libexec/netd
- 7009 netdata 20 0 138M 21016 2712 S 0.7 2.1 0:00.89 /usr/sbin/netdata
- 7012 netdata 20 0 138M 21016 2712 S 0.0 2.1 0:00.31 /usr/sbin/netdata
- 563 root 20 0 308M 202M 202M S 0.0 20.4 1:00.81 /usr/lib/systemd/
- 7019 netdata 20 0 138M 21016 2712 S 0.0 2.1 0:00.14 /usr/sbin/netdata
-```
+| Method | Description |
+|---------|----------------------------------------------------------------------|
+| comm | Process name as reported by `ps -e` or `cat /proc/{PID}/comm` |
+| cmdline | The complete command line (`cat /proc/{PID}/cmdline \| tr '\0' ' '`) |
-### atop
+> On Linux, the **comm** field is limited to 15 characters.
+> `apps.plugin` attempts to obtain the full process name by searching for it in the **cmdline**.
+> If successful, the entire process name is used; otherwise, the shortened version is used.
-`atop` also fails to break down CPU usage.
+You can use asterisks (`*`) to create patterns:
-```
-ATOP - localhost 2016/12/10 20:11:27 ----------- 10s elapsed
-PRC | sys 1.13s | user 0.43s | #proc 75 | #zombie 0 | #exit 5383 |
-CPU | sys 67% | user 31% | irq 2% | idle 0% | wait 0% |
-CPL | avg1 1.34 | avg5 1.05 | avg15 0.96 | csw 51346 | intr 10508 |
-MEM | tot 992.8M | free 211.5M | cache 470.0M | buff 87.2M | slab 164.7M |
-SWP | tot 0.0M | free 0.0M | | vmcom 207.6M | vmlim 496.4M |
-DSK | vda | busy 0% | read 0 | write 4 | avio 1.50 ms |
-NET | transport | tcpi 16 | tcpo 15 | udpi 0 | udpo 0 |
-NET | network | ipi 16 | ipo 15 | ipfrw 0 | deliv 16 |
-NET | eth0 ---- | pcki 16 | pcko 15 | si 1 Kbps | so 4 Kbps |
-
- PID SYSCPU USRCPU VGROW RGROW RDDSK WRDSK ST EXC S CPU CMD 1/600
-12789 0.98s 0.40s 0K 0K 0K 336K -- - S 14% bash
- 9 0.08s 0.00s 0K 0K 0K 0K -- - S 1% rcuos/0
- 7024 0.03s 0.00s 0K 0K 0K 0K -- - S 0% apps.plugin
- 7009 0.01s 0.01s 0K 0K 0K 4K -- - S 0% netdata
-```
+| Mode | Pattern | Description |
+|-----------|----------|------------------------------------------|
+| prefix | `name*` | Matches a **comm** that begins with name |
+| suffix | `*name` | Matches a **comm** that ends with name |
+| substring | `*name*` | Searches for name within the **cmdline** |
-### glances
+- Asterisks can be placed anywhere within name (e.g., `na*me`) without affecting the matching criteria (**comm** or **cmdline**).
+- To include process names with spaces, enclose them in quotes (single or double), like this: `'Plex Media Serv'` or `"my other process"`.
+- To include processes with single quotes, enclose them in double quotes: `"process with this ' single quote"`.
+- To include processes with double quotes, enclose them in single quotes: `'process with this " double quote'`.
+- The order of the entries in the configuration list is crucial. The first matching entry will be used, so it's important to follow a top-down hierarchy. Processes that don't match any entry will inherit the group from their parent processes.
-And the same is true for `glances`. The system runs at 100%, but `glances` reports only 17%
-per process utilization.
+There are a few command line options you can pass to `apps.plugin`. The list of available options can be acquired with the `--help` flag.
+The options can be set in the `netdata.conf` using the [`edit-config` script](/docs/netdata-agent/configuration/README.md).
-Note also, that being a `python` program, `glances` uses 1.6% CPU while it runs.
+For example, to disable user and user group charts you would set:
+```text
+[plugin:apps]
+ command options = without-users without-groups
```
-localhost Uptime: 3 days, 21:42:00
-
-CPU [100.0%] CPU 100.0% MEM 23.7% SWAP 0.0% LOAD 1-core
-MEM [ 23.7%] user: 30.9% total: 993M total: 0 1 min: 1.18
-SWAP [ 0.0%] system: 67.8% used: 236M used: 0 5 min: 1.08
- idle: 0.0% free: 757M free: 0 15 min: 1.00
-
-NETWORK Rx/s Tx/s TASKS 75 (90 thr), 1 run, 74 slp, 0 oth
-eth0 168b 2Kb
-eth1 0b 0b CPU% MEM% PID USER NI S Command
-lo 0b 0b 13.5 0.4 12789 root 0 S -bash
- 1.6 2.2 7025 root 0 R /usr/bin/python /u
-DISK I/O R/s W/s 1.0 0.0 9 root 0 S rcuos/0
-vda1 0 4K 0.3 0.2 7024 netdata 0 S /usr/libexec/netda
- 0.3 0.0 7 root 0 S rcu_sched
-FILE SYS Used Total 0.3 2.1 7009 netdata 0 S /usr/sbin/netdata
-/ (vda1) 1.56G 29.5G 0.0 0.0 17 root 0 S oom_reaper
-```
-
-### why does this happen?
-
-All the console tools report usage based on the processes found running *at the moment they
-examine the process tree*. So, they see just one `ls` command, which is actually very quick
-with minor CPU utilization. But the shell, is spawning hundreds of them, one after another
-(much like shell scripts do).
-
-### What does Netdata report?
-
-The total CPU utilization of the system:
-![image](https://cloud.githubusercontent.com/assets/2662304/21076212/9198e5a6-bf2e-11e6-9bc0-6bdea25befb2.png)
-<br/>***Figure 1**: The system overview section at Netdata, just a few seconds after the command was run*
-
-And at the applications `apps.plugin` breaks down CPU usage per application:
-
-![image](https://cloud.githubusercontent.com/assets/2662304/21076220/c9687848-bf2e-11e6-8d81-348592c5aca2.png)
-<br/>***Figure 2**: The Applications section at Netdata, just a few seconds after the command was run*
-
-So, the `ssh` session is using 95% CPU time.
-
-Why `ssh`?
+### Integration with eBPF
-`apps.plugin` groups all processes based on its configuration file.
-The default configuration has nothing for `bash`, but it has for `sshd`, so Netdata accumulates
-all ssh sessions to a dimension on the charts, called `ssh`. This includes all the processes in
-the process tree of `sshd`, **including the exited children**.
+If you don't see charts under the **eBPF syscall** or **eBPF net** sections, you should edit your
+[`ebpf.d.conf`](/src/collectors/ebpf.plugin/README.md#configure-the-ebpf-collector) file to ensure the eBPF program is enabled.
-> Distributions based on `systemd`, provide another way to get cpu utilization per user session
-> or service running: control groups, or cgroups, commonly used as part of containers
-> `apps.plugin` does not use these mechanisms. The process grouping made by `apps.plugin` works
-> on any Linux, `systemd` based or not.
+Also see our [guide on troubleshooting apps with eBPF metrics](/docs/developer-and-contributor-corner/monitor-debug-applications-ebpf.md) for ideas on how to interpret these charts in a few scenarios.
-#### a more technical description of how Netdata works
+## Permissions
-Netdata reads `/proc/<pid>/stat` for all processes, once per second and extracts `utime` and
-`stime` (user and system cpu utilization), much like all the console tools do.
+`apps.plugin` requires additional privileges to collect all the necessary information.
-But it also extracts `cutime` and `cstime` that account the user and system time of the exit children of each process.
-By keeping a map in memory of the whole process tree, it is capable of assigning the right time to every process, taking
-into account all its exited children.
+During Netdata installation, `apps.plugin` is granted the `cap_dac_read_search` and `cap_sys_ptrace+ep` capabilities.
+If this fails (i.e., `setcap` fails), `apps.plugin` is setuid to `root`.
-It is tricky, since a process may be running for 1 hour and once it exits, its parent should not
-receive the whole 1 hour of cpu time in just 1 second - you have to subtract the cpu time that has
-been reported for it prior to this iteration.
+## Security
-It is even trickier, because walking through the entire process tree takes some time itself. So,
-if you sum the CPU utilization of all processes, you might have more CPU time than the reported
-total cpu time of the system. Netdata solves this, by adapting the per process cpu utilization to
-the total of the system. [Netdata adds charts that document this normalization](https://london.my-netdata.io/default.html#menu_netdata_submenu_apps_plugin).
+`apps.plugin` operates on a one-way communication model, sending metrics to Netdata without receiving instructions. This design minimizes potential security risks.
+Although `apps.plugin` can function without escalated privileges, it may not be able to collect all the necessary information. To ensure comprehensive data collection, it's recommended to grant the required privileges.
+The increased privileges are primarily used for building the process tree in memory, iterating over running processes, collecting metrics, and sending them to Netdata. This process does not involve any external communication or user interaction, further reducing security concerns.
diff --git a/src/collectors/apps.plugin/apps_aggregations.c b/src/collectors/apps.plugin/apps_aggregations.c
new file mode 100644
index 000000000..d8846d6e7
--- /dev/null
+++ b/src/collectors/apps.plugin/apps_aggregations.c
@@ -0,0 +1,250 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "apps_plugin.h"
+
+// ----------------------------------------------------------------------------
+// update statistics on the targets
+
+static size_t zero_all_targets(struct target *root) {
+ struct target *w;
+ size_t count = 0;
+
+ for (w = root; w ; w = w->next) {
+ count++;
+
+ for(size_t f = 0; f < PDF_MAX ;f++)
+ w->values[f] = 0;
+
+ w->uptime_min = 0;
+ w->uptime_max = 0;
+
+#if (PROCESSES_HAVE_FDS == 1)
+ // zero file counters
+ if(w->target_fds) {
+ memset(w->target_fds, 0, sizeof(int) * w->target_fds_size);
+ w->openfds.files = 0;
+ w->openfds.pipes = 0;
+ w->openfds.sockets = 0;
+ w->openfds.inotifies = 0;
+ w->openfds.eventfds = 0;
+ w->openfds.timerfds = 0;
+ w->openfds.signalfds = 0;
+ w->openfds.eventpolls = 0;
+ w->openfds.other = 0;
+
+ w->max_open_files_percent = 0.0;
+ }
+#endif
+
+ if(unlikely(w->root_pid)) {
+ struct pid_on_target *pid_on_target = w->root_pid;
+
+ while(pid_on_target) {
+ struct pid_on_target *pid_on_target_to_free = pid_on_target;
+ pid_on_target = pid_on_target->next;
+ freez(pid_on_target_to_free);
+ }
+
+ w->root_pid = NULL;
+ }
+ }
+
+ return count;
+}
+
+static inline void aggregate_pid_on_target(struct target *w, struct pid_stat *p, struct target *o __maybe_unused) {
+ if(unlikely(!p->updated)) {
+ // the process is not running
+ return;
+ }
+
+ if(unlikely(!w)) {
+ netdata_log_error("pid %d %s was left without a target!", p->pid, pid_stat_comm(p));
+ return;
+ }
+
+#if (PROCESSES_HAVE_FDS == 1) && (PROCESSES_HAVE_PID_LIMITS == 1)
+ if(p->openfds_limits_percent > w->max_open_files_percent)
+ w->max_open_files_percent = p->openfds_limits_percent;
+#endif
+
+ for(size_t f = 0; f < PDF_MAX ;f++)
+ w->values[f] += p->values[f];
+
+ if(!w->uptime_min || p->values[PDF_UPTIME] < w->uptime_min) w->uptime_min = p->values[PDF_UPTIME];
+ if(!w->uptime_max || w->uptime_max < p->values[PDF_UPTIME]) w->uptime_max = p->values[PDF_UPTIME];
+
+ if(unlikely(debug_enabled)) {
+ struct pid_on_target *pid_on_target = mallocz(sizeof(struct pid_on_target));
+ pid_on_target->pid = p->pid;
+ pid_on_target->next = w->root_pid;
+ w->root_pid = pid_on_target;
+ }
+}
+
+static inline void cleanup_exited_pids(void) {
+ struct pid_stat *p = NULL;
+
+ for(p = root_of_pids(); p ;) {
+ if(!p->updated && (!p->keep || p->keeploops > 0)) {
+ if(unlikely(debug_enabled && (p->keep || p->keeploops)))
+ debug_log(" > CLEANUP cannot keep exited process %d (%s) anymore - removing it.", p->pid, pid_stat_comm(p));
+
+#if (PROCESSES_HAVE_FDS == 1)
+ for(size_t c = 0; c < p->fds_size; c++)
+ if(p->fds[c].fd > 0) {
+ file_descriptor_not_used(p->fds[c].fd);
+ clear_pid_fd(&p->fds[c]);
+ }
+#endif
+
+ const pid_t r = p->pid;
+ p = p->next;
+ del_pid_entry(r);
+ }
+ else {
+ if(unlikely(p->keep)) p->keeploops++;
+ p->keep = false;
+ p = p->next;
+ }
+ }
+}
+
+static struct target *get_apps_groups_target_for_pid(struct pid_stat *p) {
+ targets_assignment_counter++;
+
+ for(struct target *w = apps_groups_root_target; w ; w = w->next) {
+ if(w->type != TARGET_TYPE_APP_GROUP) continue;
+
+ if(pid_match_check(p, &w->match)) {
+ if(p->is_manager)
+ return NULL;
+
+ p->matched_by_config = true;
+ return w->target ? w->target : w;
+ }
+ }
+
+ return NULL;
+}
+
+static void assign_a_target_to_all_processes(void) {
+ // assign targets from app_groups.conf
+ for(struct pid_stat *p = root_of_pids(); p ; p = p->next) {
+ if(!p->target)
+ p->target = get_apps_groups_target_for_pid(p);
+ }
+
+ // assign targets from their parents, if they have
+ for(struct pid_stat *p = root_of_pids(); p ; p = p->next) {
+ if(!p->target) {
+ if(!p->is_manager) {
+ for (struct pid_stat *pp = p->parent; pp; pp = pp->parent) {
+ if(pp->is_manager) break;
+
+ if (pp->target) {
+ p->target = pp->target;
+ break;
+ }
+ }
+ }
+
+ if(!p->target) {
+ // there is no target, get it from the tree
+ p->target = get_tree_target(p);
+ }
+ }
+
+ fatal_assert(p->target != NULL);
+ }
+}
+
+void aggregate_processes_to_targets(void) {
+ assign_a_target_to_all_processes();
+ apps_groups_targets_count = zero_all_targets(apps_groups_root_target);
+
+#if (PROCESSES_HAVE_UID == 1)
+ zero_all_targets(users_root_target);
+#endif
+#if (PROCESSES_HAVE_GID == 1)
+ zero_all_targets(groups_root_target);
+#endif
+#if (PROCESSES_HAVE_SID == 1)
+ zero_all_targets(sids_root_target);
+#endif
+
+ // this has to be done, before the cleanup
+ struct target *w = NULL, *o = NULL;
+ (void)w; (void)o;
+
+ // concentrate everything on the targets
+ for(struct pid_stat *p = root_of_pids(); p ; p = p->next) {
+
+ // --------------------------------------------------------------------
+ // apps_groups and tree target
+
+ aggregate_pid_on_target(p->target, p, NULL);
+
+
+ // --------------------------------------------------------------------
+ // user target
+
+#if (PROCESSES_HAVE_UID == 1)
+ update_cached_host_users();
+
+ o = p->uid_target;
+ if(likely(p->uid_target && p->uid_target->uid == p->uid))
+ w = p->uid_target;
+ else {
+ if(unlikely(debug_enabled && p->uid_target))
+ debug_log("pid %d (%s) switched user from %u (%s) to %u.", p->pid, pid_stat_comm(p), p->uid_target->uid, p->uid_target->name, p->uid);
+
+ w = p->uid_target = get_uid_target(p->uid);
+ }
+
+ aggregate_pid_on_target(w, p, o);
+#endif
+
+ // --------------------------------------------------------------------
+ // user group target
+
+#if (PROCESSES_HAVE_GID == 1)
+ update_cached_host_users();
+
+ o = p->gid_target;
+ if(likely(p->gid_target && p->gid_target->gid == p->gid))
+ w = p->gid_target;
+ else {
+ if(unlikely(debug_enabled && p->gid_target))
+ debug_log("pid %d (%s) switched group from %u (%s) to %u.", p->pid, pid_stat_comm(p), p->gid_target->gid, p->gid_target->name, p->gid);
+
+ w = p->gid_target = get_gid_target(p->gid);
+ }
+
+ aggregate_pid_on_target(w, p, o);
+#endif
+
+ // --------------------------------------------------------------------
+ // sid target
+
+#if (PROCESSES_HAVE_SID == 1)
+ o = p->sid_target;
+ if(likely(p->sid_target && p->sid_target->sid_name == p->sid_name))
+ w = p->sid_target;
+ else
+ w = p->sid_target = get_sid_target(p->sid_name);
+
+ aggregate_pid_on_target(w, p, o);
+#endif
+
+ // --------------------------------------------------------------------
+ // aggregate all file descriptors
+
+#if (PROCESSES_HAVE_FDS == 1)
+ if(enable_file_charts)
+ aggregate_pid_fds_on_targets(p);
+#endif
+ }
+
+ cleanup_exited_pids();
+}
diff --git a/src/collectors/apps.plugin/apps_functions.c b/src/collectors/apps.plugin/apps_functions.c
index 54eaeeb90..6f8d1dc38 100644
--- a/src/collectors/apps.plugin/apps_functions.c
+++ b/src/collectors/apps.plugin/apps_functions.c
@@ -24,28 +24,42 @@ static void apps_plugin_function_processes_help(const char *transaction) {
" category:NAME\n"
" Shows only processes that are assigned the category `NAME` in apps_groups.conf\n"
"\n"
+ " parent:NAME\n"
+ " Shows only processes that are aggregated under parent `NAME`\n"
+ "\n"
+#if (PROCESSES_HAVE_UID == 1) || (PROCESSES_HAVE_SID == 1)
" user:NAME\n"
" Shows only processes that are running as user name `NAME`.\n"
"\n"
+#endif
+#if (PROCESSES_HAVE_GID == 1)
" group:NAME\n"
" Shows only processes that are running as group name `NAME`.\n"
"\n"
+#endif
" process:NAME\n"
" Shows only processes that their Command is `NAME` or their parent's Command is `NAME`.\n"
"\n"
" pid:NUMBER\n"
" Shows only processes that their PID is `NUMBER` or their parent's PID is `NUMBER`\n"
"\n"
+#if (PROCESSES_HAVE_UID == 1)
" uid:NUMBER\n"
" Shows only processes that their UID is `NUMBER`\n"
"\n"
+#endif
+#if (PROCESSES_HAVE_GID == 1)
" gid:NUMBER\n"
" Shows only processes that their GID is `NUMBER`\n"
"\n"
+#endif
"Filters can be combined. Each filter can be given only one time.\n"
);
- pluginsd_function_result_to_stdout(transaction, HTTP_RESP_OK, "text/plain", now_realtime_sec() + 3600, wb);
+ wb->response_code = HTTP_RESP_OK;
+ wb->content_type = CT_TEXT_PLAIN;
+ wb->expires = now_realtime_sec() + 3600;
+ pluginsd_function_result_to_stdout(transaction, wb);
buffer_free(wb);
}
@@ -69,21 +83,26 @@ void function_processes(const char *transaction, char *function,
struct pid_stat *p;
bool show_cmdline = http_access_user_has_enough_access_level_for_endpoint(
- access,
- HTTP_ACCESS_SIGNED_ID | HTTP_ACCESS_SAME_SPACE | HTTP_ACCESS_SENSITIVE_DATA |
- HTTP_ACCESS_VIEW_AGENT_CONFIG) || enable_function_cmdline;
+ access, HTTP_ACCESS_SIGNED_ID | HTTP_ACCESS_SAME_SPACE | HTTP_ACCESS_SENSITIVE_DATA | HTTP_ACCESS_VIEW_AGENT_CONFIG) || enable_function_cmdline;
char *words[PLUGINSD_MAX_WORDS] = { NULL };
- size_t num_words = quoted_strings_splitter_pluginsd(function, words, PLUGINSD_MAX_WORDS);
+ size_t num_words = quoted_strings_splitter_whitespace(function, words, PLUGINSD_MAX_WORDS);
- struct target *category = NULL, *user = NULL, *group = NULL;
+ struct target *category = NULL, *user = NULL, *group = NULL; (void)category; (void)user; (void)group;
+#if (PROCESSES_HAVE_UID == 1)
+ struct target *users_sid_root = users_root_target;
+#endif
+#if (PROCESSES_HAVE_SID == 1)
+ struct target *users_sid_root = sids_root_target;
+#endif
const char *process_name = NULL;
pid_t pid = 0;
- uid_t uid = 0;
- gid_t gid = 0;
+ uid_t uid = 0; (void)uid;
+ gid_t gid = 0; (void)gid;
bool info = false;
bool filter_pid = false, filter_uid = false, filter_gid = false;
+ (void)filter_uid; (void)filter_gid;
for(int i = 1; i < PLUGINSD_MAX_WORDS ;i++) {
const char *keyword = get_word(words, num_words, i);
@@ -97,14 +116,17 @@ void function_processes(const char *transaction, char *function,
return;
}
}
+#if (PROCESSES_HAVE_UID == 1) || (PROCESSES_HAVE_SID == 1)
else if(!user && strncmp(keyword, PROCESS_FILTER_USER, strlen(PROCESS_FILTER_USER)) == 0) {
- user = find_target_by_name(users_root_target, &keyword[strlen(PROCESS_FILTER_USER)]);
+ user = find_target_by_name(users_sid_root, &keyword[strlen(PROCESS_FILTER_USER)]);
if(!user) {
pluginsd_function_json_error_to_stdout(transaction, HTTP_RESP_BAD_REQUEST,
"No user with that name found.");
return;
}
}
+#endif
+#if (PROCESSES_HAVE_GID == 1)
else if(strncmp(keyword, PROCESS_FILTER_GROUP, strlen(PROCESS_FILTER_GROUP)) == 0) {
group = find_target_by_name(groups_root_target, &keyword[strlen(PROCESS_FILTER_GROUP)]);
if(!group) {
@@ -113,6 +135,7 @@ void function_processes(const char *transaction, char *function,
return;
}
}
+#endif
else if(!process_name && strncmp(keyword, PROCESS_FILTER_PROCESS, strlen(PROCESS_FILTER_PROCESS)) == 0) {
process_name = &keyword[strlen(PROCESS_FILTER_PROCESS)];
}
@@ -120,14 +143,18 @@ void function_processes(const char *transaction, char *function,
pid = str2i(&keyword[strlen(PROCESS_FILTER_PID)]);
filter_pid = true;
}
+#if (PROCESSES_HAVE_UID == 1)
else if(!uid && strncmp(keyword, PROCESS_FILTER_UID, strlen(PROCESS_FILTER_UID)) == 0) {
uid = str2i(&keyword[strlen(PROCESS_FILTER_UID)]);
filter_uid = true;
}
+#endif
+#if (PROCESSES_HAVE_GID == 1)
else if(!gid && strncmp(keyword, PROCESS_FILTER_GID, strlen(PROCESS_FILTER_GID)) == 0) {
gid = str2i(&keyword[strlen(PROCESS_FILTER_GID)]);
filter_gid = true;
}
+#endif
else if(strcmp(keyword, "help") == 0) {
apps_plugin_function_processes_help(transaction);
return;
@@ -137,10 +164,6 @@ void function_processes(const char *transaction, char *function,
}
}
- unsigned int cpu_divisor = time_factor * RATES_DETAIL / 100;
- unsigned int memory_divisor = 1024;
- unsigned int io_divisor = 1024 * RATES_DETAIL;
-
BUFFER *wb = buffer_create(4096, NULL);
buffer_json_initialize(wb, "\"", "\"", 0, true, BUFFER_JSON_OPTIONS_MINIFY);
buffer_json_member_add_uint64(wb, "status", HTTP_RESP_OK);
@@ -153,38 +176,71 @@ void function_processes(const char *transaction, char *function,
if(info)
goto close_and_send;
+ uint64_t cpu_divisor = NSEC_PER_SEC / 100;
+ unsigned int memory_divisor = 1024 * 1024;
+ unsigned int io_divisor = 1024 * RATES_DETAIL;
+
+ uint64_t total_memory_bytes = OS_FUNCTION(apps_os_get_total_memory)();
+
NETDATA_DOUBLE
- UserCPU_max = 0.0
+ UserCPU_max = 0.0
, SysCPU_max = 0.0
+#if (PROCESSES_HAVE_CPU_GUEST_TIME == 1)
, GuestCPU_max = 0.0
+#endif
+#if (PROCESSES_HAVE_CPU_CHILDREN_TIME == 1)
, CUserCPU_max = 0.0
, CSysCPU_max = 0.0
+#if (PROCESSES_HAVE_CPU_GUEST_TIME == 1)
, CGuestCPU_max = 0.0
+#endif
+#endif
, CPU_max = 0.0
, VMSize_max = 0.0
, RSS_max = 0.0
+#if (PROCESSES_HAVE_VMSHARED == 1)
, Shared_max = 0.0
+#endif
, Swap_max = 0.0
, Memory_max = 0.0
+#if (PROCESSES_HAVE_FDS == 1) && (PROCESSES_HAVE_PID_LIMITS == 1)
, FDsLimitPercent_max = 0.0
+#endif
;
unsigned long long
Processes_max = 0
, Threads_max = 0
+#if (PROCESSES_HAVE_VOLCTX == 1)
, VoluntaryCtxtSwitches_max = 0
+#endif
+#if (PROCESSES_HAVE_NVOLCTX == 1)
, NonVoluntaryCtxtSwitches_max = 0
+#endif
, Uptime_max = 0
, MinFlt_max = 0
- , CMinFlt_max = 0
- , TMinFlt_max = 0
+#if (PROCESSES_HAVE_MAJFLT == 1)
, MajFlt_max = 0
+#endif
+#if (PROCESSES_HAVE_CHILDREN_FLTS == 1)
+ , CMinFlt_max = 0
, CMajFlt_max = 0
+ , TMinFlt_max = 0
, TMajFlt_max = 0
+#endif
+#if (PROCESSES_HAVE_LOGICAL_IO == 1)
+ , LReads_max = 0
+ , LWrites_max = 0
+#endif
+#if (PROCESSES_HAVE_PHYSICAL_IO == 1)
, PReads_max = 0
, PWrites_max = 0
- , RCalls_max = 0
- , WCalls_max = 0
+#endif
+#if (PROCESSES_HAVE_IO_CALLS == 1)
+ , ROps_max = 0
+ , WOps_max = 0
+#endif
+#if (PROCESSES_HAVE_FDS == 1)
, Files_max = 0
, Pipes_max = 0
, Sockets_max = 0
@@ -195,40 +251,52 @@ void function_processes(const char *transaction, char *function,
, EvPollFDs_max = 0
, OtherFDs_max = 0
, FDs_max = 0
+#endif
+#if (PROCESSES_HAVE_HANDLES == 1)
+ , Handles_max = 0
+#endif
;
-#if !defined(__FreeBSD__) && !defined(__APPLE__)
- unsigned long long
- LReads_max = 0
- , LWrites_max = 0
- ;
-#endif // !__FreeBSD__ !__APPLE_
+ netdata_mutex_lock(&apps_and_stdout_mutex);
int rows= 0;
- for(p = root_of_pids; p ; p = p->next) {
+ for(p = root_of_pids(); p ; p = p->next) {
if(!p->updated)
continue;
if(category && p->target != category)
continue;
- if(user && p->user_target != user)
+#if (PROCESSES_HAVE_UID == 1)
+ if(user && p->uid_target != user)
continue;
+#endif
- if(group && p->group_target != group)
+#if (PROCESSES_HAVE_GID == 1)
+ if(group && p->gid_target != group)
continue;
+#endif
- if(process_name && ((strcmp(p->comm, process_name) != 0 && !p->parent) || (p->parent && strcmp(p->comm, process_name) != 0 && strcmp(p->parent->comm, process_name) != 0)))
+#if (PROCESSES_HAVE_SID == 1)
+ if(user && p->sid_target != user)
+ continue;
+#endif
+
+ if(process_name && ((strcmp(pid_stat_comm(p), process_name) != 0 && !p->parent) || (p->parent && strcmp(pid_stat_comm(p), process_name) != 0 && strcmp(pid_stat_comm(p->parent), process_name) != 0)))
continue;
if(filter_pid && p->pid != pid && p->ppid != pid)
continue;
+#if (PROCESSES_HAVE_UID == 1)
if(filter_uid && p->uid != uid)
continue;
+#endif
+#if (PROCESSES_HAVE_GID == 1)
if(filter_gid && p->gid != gid)
continue;
+#endif
rows++;
@@ -241,80 +309,130 @@ void function_processes(const char *transaction, char *function,
buffer_json_add_array_item_uint64(wb, p->pid);
// cmd
- buffer_json_add_array_item_string(wb, p->comm);
+ buffer_json_add_array_item_string(wb, string2str(p->comm));
+
+#if (PROCESSES_HAVE_COMM_AND_NAME == 1)
+ // name
+ buffer_json_add_array_item_string(wb, string2str(p->name ? p->name : p->comm));
+#endif
// cmdline
if (show_cmdline) {
- buffer_json_add_array_item_string(wb, (p->cmdline && *p->cmdline) ? p->cmdline : p->comm);
+ buffer_json_add_array_item_string(wb, (string_strlen(p->cmdline)) ? pid_stat_cmdline(p) : pid_stat_comm(p));
}
// ppid
buffer_json_add_array_item_uint64(wb, p->ppid);
// category
- buffer_json_add_array_item_string(wb, p->target ? p->target->name : "-");
+ buffer_json_add_array_item_string(wb, p->target ? string2str(p->target->name) : "-");
+#if (PROCESSES_HAVE_UID == 1)
// user
- buffer_json_add_array_item_string(wb, p->user_target ? p->user_target->name : "-");
+ buffer_json_add_array_item_string(wb, p->uid_target ? string2str(p->uid_target->name) : "-");
// uid
buffer_json_add_array_item_uint64(wb, p->uid);
+#endif
+#if (PROCESSES_HAVE_SID == 1)
+ // account
+ buffer_json_add_array_item_string(wb, p->sid_target ? string2str(p->sid_target->name) : "-");
+#endif
+#if (PROCESSES_HAVE_GID == 1)
// group
- buffer_json_add_array_item_string(wb, p->group_target ? p->group_target->name : "-");
+ buffer_json_add_array_item_string(wb, p->gid_target ? string2str(p->gid_target->name) : "-");
// gid
buffer_json_add_array_item_uint64(wb, p->gid);
+#endif
// CPU utilization %
- add_value_field_ndd_with_max(wb, CPU, (NETDATA_DOUBLE)(p->utime + p->stime + p->gtime + p->cutime + p->cstime + p->cgtime) / cpu_divisor);
- add_value_field_ndd_with_max(wb, UserCPU, (NETDATA_DOUBLE)(p->utime) / cpu_divisor);
- add_value_field_ndd_with_max(wb, SysCPU, (NETDATA_DOUBLE)(p->stime) / cpu_divisor);
- add_value_field_ndd_with_max(wb, GuestCPU, (NETDATA_DOUBLE)(p->gtime) / cpu_divisor);
- add_value_field_ndd_with_max(wb, CUserCPU, (NETDATA_DOUBLE)(p->cutime) / cpu_divisor);
- add_value_field_ndd_with_max(wb, CSysCPU, (NETDATA_DOUBLE)(p->cstime) / cpu_divisor);
- add_value_field_ndd_with_max(wb, CGuestCPU, (NETDATA_DOUBLE)(p->cgtime) / cpu_divisor);
+ kernel_uint_t total_cpu = p->values[PDF_UTIME] + p->values[PDF_STIME];
+
+#if (PROCESSES_HAVE_CPU_GUEST_TIME)
+ total_cpu += p->values[PDF_GTIME];
+#endif
+#if (PROCESSES_HAVE_CPU_CHILDREN_TIME)
+ total_cpu += p->values[PDF_CUTIME] + p->values[PDF_CSTIME];
+#if (PROCESSES_HAVE_CPU_GUEST_TIME)
+ total_cpu += p->values[PDF_CGTIME];
+#endif
+#endif
+ add_value_field_ndd_with_max(wb, CPU, (NETDATA_DOUBLE)(total_cpu) / cpu_divisor);
+ add_value_field_ndd_with_max(wb, UserCPU, (NETDATA_DOUBLE)(p->values[PDF_UTIME]) / cpu_divisor);
+ add_value_field_ndd_with_max(wb, SysCPU, (NETDATA_DOUBLE)(p->values[PDF_STIME]) / cpu_divisor);
+#if (PROCESSES_HAVE_CPU_GUEST_TIME)
+ add_value_field_ndd_with_max(wb, GuestCPU, (NETDATA_DOUBLE)(p->values[PDF_GTIME]) / cpu_divisor);
+#endif
+#if (PROCESSES_HAVE_CPU_CHILDREN_TIME)
+ add_value_field_ndd_with_max(wb, CUserCPU, (NETDATA_DOUBLE)(p->values[PDF_CUTIME]) / cpu_divisor);
+ add_value_field_ndd_with_max(wb, CSysCPU, (NETDATA_DOUBLE)(p->values[PDF_CSTIME]) / cpu_divisor);
+#if (PROCESSES_HAVE_CPU_GUEST_TIME)
+ add_value_field_ndd_with_max(wb, CGuestCPU, (NETDATA_DOUBLE)(p->values[PDF_CGTIME]) / cpu_divisor);
+#endif
+#endif
- add_value_field_llu_with_max(wb, VoluntaryCtxtSwitches, p->status_voluntary_ctxt_switches / RATES_DETAIL);
- add_value_field_llu_with_max(wb, NonVoluntaryCtxtSwitches, p->status_nonvoluntary_ctxt_switches / RATES_DETAIL);
+#if (PROCESSES_HAVE_VOLCTX == 1)
+ add_value_field_llu_with_max(wb, VoluntaryCtxtSwitches, p->values[PDF_VOLCTX] / RATES_DETAIL);
+#endif
+#if (PROCESSES_HAVE_NVOLCTX == 1)
+ add_value_field_llu_with_max(wb, NonVoluntaryCtxtSwitches, p->values[PDF_NVOLCTX] / RATES_DETAIL);
+#endif
// memory MiB
- if(MemTotal)
- add_value_field_ndd_with_max(wb, Memory, (NETDATA_DOUBLE)p->status_vmrss * 100.0 / (NETDATA_DOUBLE)MemTotal);
+ if(total_memory_bytes)
+ add_value_field_ndd_with_max(wb, Memory, (NETDATA_DOUBLE)p->values[PDF_VMRSS] * 100.0 / (NETDATA_DOUBLE)total_memory_bytes);
+
+ add_value_field_ndd_with_max(wb, RSS, (NETDATA_DOUBLE)p->values[PDF_VMRSS] / memory_divisor);
- add_value_field_ndd_with_max(wb, RSS, (NETDATA_DOUBLE)p->status_vmrss / memory_divisor);
- add_value_field_ndd_with_max(wb, Shared, (NETDATA_DOUBLE)p->status_vmshared / memory_divisor);
-#if !defined(__APPLE__)
- add_value_field_ndd_with_max(wb, VMSize, (NETDATA_DOUBLE)p->status_vmsize / memory_divisor);
+#if (PROCESSES_HAVE_VMSHARED == 1)
+ add_value_field_ndd_with_max(wb, Shared, (NETDATA_DOUBLE)p->values[PDF_VMSHARED] / memory_divisor);
#endif
- add_value_field_ndd_with_max(wb, Swap, (NETDATA_DOUBLE)p->status_vmswap / memory_divisor);
+ add_value_field_ndd_with_max(wb, VMSize, (NETDATA_DOUBLE)p->values[PDF_VMSIZE] / memory_divisor);
+#if (PROCESSES_HAVE_VMSWAP == 1)
+ add_value_field_ndd_with_max(wb, Swap, (NETDATA_DOUBLE)p->values[PDF_VMSWAP] / memory_divisor);
+#endif
+
+#if (PROCESSES_HAVE_PHYSICAL_IO == 1)
// Physical I/O
- add_value_field_llu_with_max(wb, PReads, p->io_storage_bytes_read / io_divisor);
- add_value_field_llu_with_max(wb, PWrites, p->io_storage_bytes_written / io_divisor);
+ add_value_field_llu_with_max(wb, PReads, p->values[PDF_PREAD] / io_divisor);
+ add_value_field_llu_with_max(wb, PWrites, p->values[PDF_PWRITE] / io_divisor);
+#endif
+#if (PROCESSES_HAVE_LOGICAL_IO == 1)
// Logical I/O
-#if !defined(__FreeBSD__) && !defined(__APPLE__)
- add_value_field_llu_with_max(wb, LReads, p->io_logical_bytes_read / io_divisor);
- add_value_field_llu_with_max(wb, LWrites, p->io_logical_bytes_written / io_divisor);
+ add_value_field_llu_with_max(wb, LReads, p->values[PDF_LREAD] / io_divisor);
+ add_value_field_llu_with_max(wb, LWrites, p->values[PDF_LWRITE] / io_divisor);
#endif
+#if (PROCESSES_HAVE_IO_CALLS == 1)
// I/O calls
- add_value_field_llu_with_max(wb, RCalls, p->io_read_calls / RATES_DETAIL);
- add_value_field_llu_with_max(wb, WCalls, p->io_write_calls / RATES_DETAIL);
+ add_value_field_llu_with_max(wb, ROps, p->values[PDF_OREAD] / RATES_DETAIL);
+ add_value_field_llu_with_max(wb, WOps, p->values[PDF_OWRITE] / RATES_DETAIL);
+#endif
// minor page faults
- add_value_field_llu_with_max(wb, MinFlt, p->minflt / RATES_DETAIL);
- add_value_field_llu_with_max(wb, CMinFlt, p->cminflt / RATES_DETAIL);
- add_value_field_llu_with_max(wb, TMinFlt, (p->minflt + p->cminflt) / RATES_DETAIL);
+ add_value_field_llu_with_max(wb, MinFlt, p->values[PDF_MINFLT] / RATES_DETAIL);
+#if (PROCESSES_HAVE_MAJFLT == 1)
// major page faults
- add_value_field_llu_with_max(wb, MajFlt, p->majflt / RATES_DETAIL);
- add_value_field_llu_with_max(wb, CMajFlt, p->cmajflt / RATES_DETAIL);
- add_value_field_llu_with_max(wb, TMajFlt, (p->majflt + p->cmajflt) / RATES_DETAIL);
+ add_value_field_llu_with_max(wb, MajFlt, p->values[PDF_MAJFLT] / RATES_DETAIL);
+#endif
+
+#if (PROCESSES_HAVE_CHILDREN_FLTS == 1)
+ add_value_field_llu_with_max(wb, CMinFlt, p->values[PDF_CMINFLT] / RATES_DETAIL);
+ add_value_field_llu_with_max(wb, CMajFlt, p->values[PDF_CMAJFLT] / RATES_DETAIL);
+ add_value_field_llu_with_max(wb, TMinFlt, (p->values[PDF_MINFLT] + p->values[PDF_CMINFLT]) / RATES_DETAIL);
+ add_value_field_llu_with_max(wb, TMajFlt, (p->values[PDF_MAJFLT] + p->values[PDF_CMAJFLT]) / RATES_DETAIL);
+#endif
+#if (PROCESSES_HAVE_FDS == 1)
// open file descriptors
+#if (PROCESSES_HAVE_PID_LIMITS == 1)
add_value_field_ndd_with_max(wb, FDsLimitPercent, p->openfds_limits_percent);
+#endif
add_value_field_llu_with_max(wb, FDs, pid_openfds_sum(p));
add_value_field_llu_with_max(wb, Files, p->openfds.files);
add_value_field_llu_with_max(wb, Pipes, p->openfds.pipes);
@@ -325,12 +443,16 @@ void function_processes(const char *transaction, char *function,
add_value_field_llu_with_max(wb, SigFDs, p->openfds.signalfds);
add_value_field_llu_with_max(wb, EvPollFDs, p->openfds.eventpolls);
add_value_field_llu_with_max(wb, OtherFDs, p->openfds.other);
+#endif
+#if (PROCESSES_HAVE_HANDLES == 1)
+ add_value_field_llu_with_max(wb, Handles, p->values[PDF_HANDLES]);
+#endif
// processes, threads, uptime
- add_value_field_llu_with_max(wb, Processes, p->children_count);
- add_value_field_llu_with_max(wb, Threads, p->num_threads);
- add_value_field_llu_with_max(wb, Uptime, p->uptime);
+ add_value_field_llu_with_max(wb, Processes, p->values[PDF_PROCESSES]);
+ add_value_field_llu_with_max(wb, Threads, p->values[PDF_THREADS]);
+ add_value_field_llu_with_max(wb, Uptime, p->values[PDF_UPTIME]);
buffer_json_array_close(wb); // for each pid
}
@@ -357,6 +479,14 @@ void function_processes(const char *transaction, char *function,
RRDF_FIELD_FILTER_MULTISELECT,
RRDF_FIELD_OPTS_VISIBLE | RRDF_FIELD_OPTS_STICKY, NULL);
+#if (PROCESSES_HAVE_COMM_AND_NAME == 1)
+ buffer_rrdf_table_add_field(wb, field_id++, "Name", "Process Friendly Name", RRDF_FIELD_TYPE_STRING,
+ RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_NONE, 0, NULL, NAN,
+ RRDF_FIELD_SORT_ASCENDING, NULL, RRDF_FIELD_SUMMARY_COUNT,
+ RRDF_FIELD_FILTER_MULTISELECT,
+ RRDF_FIELD_OPTS_VISIBLE | RRDF_FIELD_OPTS_STICKY, NULL);
+#endif
+
if (show_cmdline) {
buffer_rrdf_table_add_field(wb, field_id++, "CmdLine", "Command Line", RRDF_FIELD_TYPE_STRING,
RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_NONE, 0,
@@ -370,22 +500,30 @@ void function_processes(const char *transaction, char *function,
NAN, RRDF_FIELD_SORT_ASCENDING, "PID", RRDF_FIELD_SUMMARY_COUNT,
RRDF_FIELD_FILTER_MULTISELECT,
RRDF_FIELD_OPTS_NONE, NULL);
+
buffer_rrdf_table_add_field(wb, field_id++, "Category", "Category (apps_groups.conf)", RRDF_FIELD_TYPE_STRING,
RRDF_FIELD_VISUAL_VALUE,
RRDF_FIELD_TRANSFORM_NONE,
0, NULL, NAN, RRDF_FIELD_SORT_ASCENDING, NULL, RRDF_FIELD_SUMMARY_COUNT,
RRDF_FIELD_FILTER_MULTISELECT,
RRDF_FIELD_OPTS_VISIBLE | RRDF_FIELD_OPTS_STICKY, NULL);
+
+#if (PROCESSES_HAVE_UID == 1) || (PROCESSES_HAVE_SID == 1)
buffer_rrdf_table_add_field(wb, field_id++, "User", "User Owner", RRDF_FIELD_TYPE_STRING,
RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_NONE, 0, NULL, NAN,
RRDF_FIELD_SORT_ASCENDING, NULL, RRDF_FIELD_SUMMARY_COUNT,
RRDF_FIELD_FILTER_MULTISELECT,
RRDF_FIELD_OPTS_VISIBLE, NULL);
+#endif
+#if (PROCESSES_HAVE_UID == 1)
buffer_rrdf_table_add_field(wb, field_id++, "Uid", "User ID", RRDF_FIELD_TYPE_INTEGER, RRDF_FIELD_VISUAL_VALUE,
RRDF_FIELD_TRANSFORM_NUMBER, 0, NULL, NAN,
RRDF_FIELD_SORT_ASCENDING, NULL, RRDF_FIELD_SUMMARY_COUNT,
RRDF_FIELD_FILTER_MULTISELECT,
RRDF_FIELD_OPTS_NONE, NULL);
+#endif
+
+#if (PROCESSES_HAVE_GID == 1)
buffer_rrdf_table_add_field(wb, field_id++, "Group", "Group Owner", RRDF_FIELD_TYPE_STRING,
RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_NONE, 0, NULL, NAN,
RRDF_FIELD_SORT_ASCENDING, NULL, RRDF_FIELD_SUMMARY_COUNT,
@@ -396,6 +534,7 @@ void function_processes(const char *transaction, char *function,
RRDF_FIELD_SORT_ASCENDING, NULL, RRDF_FIELD_SUMMARY_COUNT,
RRDF_FIELD_FILTER_MULTISELECT,
RRDF_FIELD_OPTS_NONE, NULL);
+#endif
// CPU utilization
buffer_rrdf_table_add_field(wb, field_id++, "CPU", "Total CPU Time (100% = 1 core)",
@@ -413,11 +552,14 @@ void function_processes(const char *transaction, char *function,
RRDF_FIELD_VISUAL_BAR, RRDF_FIELD_TRANSFORM_NUMBER, 2, "%", SysCPU_max,
RRDF_FIELD_SORT_DESCENDING, NULL, RRDF_FIELD_SUMMARY_SUM, RRDF_FIELD_FILTER_RANGE,
RRDF_FIELD_OPTS_NONE, NULL);
+#if (PROCESSES_HAVE_CPU_GUEST_TIME == 1)
buffer_rrdf_table_add_field(wb, field_id++, "GuestCPU", "Guest CPU Time (100% = 1 core)",
RRDF_FIELD_TYPE_BAR_WITH_INTEGER,
RRDF_FIELD_VISUAL_BAR, RRDF_FIELD_TRANSFORM_NUMBER, 2, "%", GuestCPU_max,
RRDF_FIELD_SORT_DESCENDING, NULL, RRDF_FIELD_SUMMARY_SUM, RRDF_FIELD_FILTER_RANGE,
RRDF_FIELD_OPTS_NONE, NULL);
+#endif
+#if (PROCESSES_HAVE_CPU_CHILDREN_TIME == 1)
buffer_rrdf_table_add_field(wb, field_id++, "CUserCPU", "Children User CPU Time (100% = 1 core)",
RRDF_FIELD_TYPE_BAR_WITH_INTEGER, RRDF_FIELD_VISUAL_BAR,
RRDF_FIELD_TRANSFORM_NUMBER, 2, "%", CUserCPU_max, RRDF_FIELD_SORT_DESCENDING, NULL,
@@ -428,26 +570,33 @@ void function_processes(const char *transaction, char *function,
RRDF_FIELD_TRANSFORM_NUMBER, 2, "%", CSysCPU_max, RRDF_FIELD_SORT_DESCENDING, NULL,
RRDF_FIELD_SUMMARY_SUM, RRDF_FIELD_FILTER_RANGE,
RRDF_FIELD_OPTS_NONE, NULL);
+#if (PROCESSES_HAVE_CPU_GUEST_TIME == 1)
buffer_rrdf_table_add_field(wb, field_id++, "CGuestCPU", "Children Guest CPU Time (100% = 1 core)",
RRDF_FIELD_TYPE_BAR_WITH_INTEGER, RRDF_FIELD_VISUAL_BAR,
RRDF_FIELD_TRANSFORM_NUMBER, 2, "%", CGuestCPU_max, RRDF_FIELD_SORT_DESCENDING,
NULL,
RRDF_FIELD_SUMMARY_SUM, RRDF_FIELD_FILTER_RANGE, RRDF_FIELD_OPTS_NONE, NULL);
+#endif
+#endif
+#if (PROCESSES_HAVE_VOLCTX == 1)
// CPU context switches
buffer_rrdf_table_add_field(wb, field_id++, "vCtxSwitch", "Voluntary Context Switches",
RRDF_FIELD_TYPE_BAR_WITH_INTEGER,
RRDF_FIELD_VISUAL_BAR, RRDF_FIELD_TRANSFORM_NUMBER, 2, "switches/s",
VoluntaryCtxtSwitches_max, RRDF_FIELD_SORT_DESCENDING, NULL,
RRDF_FIELD_SUMMARY_SUM, RRDF_FIELD_FILTER_RANGE, RRDF_FIELD_OPTS_NONE, NULL);
+#endif
+#if (PROCESSES_HAVE_NVOLCTX == 1)
buffer_rrdf_table_add_field(wb, field_id++, "iCtxSwitch", "Involuntary Context Switches",
RRDF_FIELD_TYPE_BAR_WITH_INTEGER,
RRDF_FIELD_VISUAL_BAR, RRDF_FIELD_TRANSFORM_NUMBER, 2, "switches/s",
NonVoluntaryCtxtSwitches_max, RRDF_FIELD_SORT_DESCENDING, NULL,
RRDF_FIELD_SUMMARY_SUM, RRDF_FIELD_FILTER_RANGE, RRDF_FIELD_OPTS_NONE, NULL);
+#endif
// memory
- if (MemTotal)
+ if (total_memory_bytes)
buffer_rrdf_table_add_field(wb, field_id++, "Memory", "Memory Percentage", RRDF_FIELD_TYPE_BAR_WITH_INTEGER,
RRDF_FIELD_VISUAL_BAR,
RRDF_FIELD_TRANSFORM_NUMBER, 2, "%", 100.0, RRDF_FIELD_SORT_DESCENDING, NULL,
@@ -460,25 +609,30 @@ void function_processes(const char *transaction, char *function,
2, "MiB", RSS_max, RRDF_FIELD_SORT_DESCENDING, NULL, RRDF_FIELD_SUMMARY_SUM,
RRDF_FIELD_FILTER_RANGE,
RRDF_FIELD_OPTS_VISIBLE, NULL);
+#if (PROCESSES_HAVE_VMSHARED == 1)
buffer_rrdf_table_add_field(wb, field_id++, "Shared", "Shared Pages", RRDF_FIELD_TYPE_BAR_WITH_INTEGER,
RRDF_FIELD_VISUAL_BAR, RRDF_FIELD_TRANSFORM_NUMBER, 2,
"MiB", Shared_max, RRDF_FIELD_SORT_DESCENDING, NULL, RRDF_FIELD_SUMMARY_SUM,
RRDF_FIELD_FILTER_RANGE,
RRDF_FIELD_OPTS_VISIBLE, NULL);
-#if !defined(__APPLE__)
+#endif
+
buffer_rrdf_table_add_field(wb, field_id++, "Virtual", "Virtual Memory Size", RRDF_FIELD_TYPE_BAR_WITH_INTEGER,
RRDF_FIELD_VISUAL_BAR,
RRDF_FIELD_TRANSFORM_NUMBER, 2, "MiB", VMSize_max, RRDF_FIELD_SORT_DESCENDING, NULL,
RRDF_FIELD_SUMMARY_SUM, RRDF_FIELD_FILTER_RANGE,
RRDF_FIELD_OPTS_VISIBLE, NULL);
-#endif
+
+#if (PROCESSES_HAVE_VMSWAP == 1)
buffer_rrdf_table_add_field(wb, field_id++, "Swap", "Swap Memory", RRDF_FIELD_TYPE_BAR_WITH_INTEGER,
RRDF_FIELD_VISUAL_BAR, RRDF_FIELD_TRANSFORM_NUMBER, 2,
"MiB",
Swap_max, RRDF_FIELD_SORT_DESCENDING, NULL, RRDF_FIELD_SUMMARY_SUM,
RRDF_FIELD_FILTER_RANGE,
RRDF_FIELD_OPTS_NONE, NULL);
+#endif
+#if (PROCESSES_HAVE_PHYSICAL_IO == 1)
// Physical I/O
buffer_rrdf_table_add_field(wb, field_id++, "PReads", "Physical I/O Reads", RRDF_FIELD_TYPE_BAR_WITH_INTEGER,
RRDF_FIELD_VISUAL_BAR, RRDF_FIELD_TRANSFORM_NUMBER,
@@ -490,33 +644,41 @@ void function_processes(const char *transaction, char *function,
RRDF_FIELD_TRANSFORM_NUMBER, 2, "KiB/s", PWrites_max, RRDF_FIELD_SORT_DESCENDING,
NULL, RRDF_FIELD_SUMMARY_SUM, RRDF_FIELD_FILTER_RANGE,
RRDF_FIELD_OPTS_VISIBLE, NULL);
+#endif
+#if (PROCESSES_HAVE_LOGICAL_IO == 1)
+#if (PROCESSES_HAVE_PHYSICAL_IO == 1)
+ RRDF_FIELD_OPTIONS logical_io_options = RRDF_FIELD_OPTS_NONE;
+#else
+ RRDF_FIELD_OPTIONS logical_io_options = RRDF_FIELD_OPTS_VISIBLE;
+#endif
// Logical I/O
-#if !defined(__FreeBSD__) && !defined(__APPLE__)
buffer_rrdf_table_add_field(wb, field_id++, "LReads", "Logical I/O Reads", RRDF_FIELD_TYPE_BAR_WITH_INTEGER,
RRDF_FIELD_VISUAL_BAR, RRDF_FIELD_TRANSFORM_NUMBER,
2, "KiB/s", LReads_max, RRDF_FIELD_SORT_DESCENDING, NULL, RRDF_FIELD_SUMMARY_SUM,
RRDF_FIELD_FILTER_RANGE,
- RRDF_FIELD_OPTS_NONE, NULL);
+ logical_io_options, NULL);
buffer_rrdf_table_add_field(wb, field_id++, "LWrites", "Logical I/O Writes", RRDF_FIELD_TYPE_BAR_WITH_INTEGER,
RRDF_FIELD_VISUAL_BAR,
RRDF_FIELD_TRANSFORM_NUMBER,
2, "KiB/s", LWrites_max, RRDF_FIELD_SORT_DESCENDING, NULL, RRDF_FIELD_SUMMARY_SUM,
RRDF_FIELD_FILTER_RANGE,
- RRDF_FIELD_OPTS_NONE, NULL);
+ logical_io_options, NULL);
#endif
+#if (PROCESSES_HAVE_IO_CALLS == 1)
// I/O calls
- buffer_rrdf_table_add_field(wb, field_id++, "RCalls", "I/O Read Calls", RRDF_FIELD_TYPE_BAR_WITH_INTEGER,
+ buffer_rrdf_table_add_field(wb, field_id++, "ROps", "I/O Read Operations", RRDF_FIELD_TYPE_BAR_WITH_INTEGER,
RRDF_FIELD_VISUAL_BAR, RRDF_FIELD_TRANSFORM_NUMBER, 2,
- "calls/s", RCalls_max, RRDF_FIELD_SORT_DESCENDING, NULL, RRDF_FIELD_SUMMARY_SUM,
+ "ops/s", ROps_max, RRDF_FIELD_SORT_DESCENDING, NULL, RRDF_FIELD_SUMMARY_SUM,
RRDF_FIELD_FILTER_RANGE,
RRDF_FIELD_OPTS_NONE, NULL);
- buffer_rrdf_table_add_field(wb, field_id++, "WCalls", "I/O Write Calls", RRDF_FIELD_TYPE_BAR_WITH_INTEGER,
+ buffer_rrdf_table_add_field(wb, field_id++, "WOps", "I/O Write Operations", RRDF_FIELD_TYPE_BAR_WITH_INTEGER,
RRDF_FIELD_VISUAL_BAR, RRDF_FIELD_TRANSFORM_NUMBER, 2,
- "calls/s", WCalls_max, RRDF_FIELD_SORT_DESCENDING, NULL, RRDF_FIELD_SUMMARY_SUM,
+ "ops/s", WOps_max, RRDF_FIELD_SORT_DESCENDING, NULL, RRDF_FIELD_SUMMARY_SUM,
RRDF_FIELD_FILTER_RANGE,
RRDF_FIELD_OPTS_NONE, NULL);
+#endif
// minor page faults
buffer_rrdf_table_add_field(wb, field_id++, "MinFlt", "Minor Page Faults/s", RRDF_FIELD_TYPE_BAR_WITH_INTEGER,
@@ -525,18 +687,8 @@ void function_processes(const char *transaction, char *function,
2, "pgflts/s", MinFlt_max, RRDF_FIELD_SORT_DESCENDING, NULL, RRDF_FIELD_SUMMARY_SUM,
RRDF_FIELD_FILTER_RANGE,
RRDF_FIELD_OPTS_NONE, NULL);
- buffer_rrdf_table_add_field(wb, field_id++, "CMinFlt", "Children Minor Page Faults/s",
- RRDF_FIELD_TYPE_BAR_WITH_INTEGER,
- RRDF_FIELD_VISUAL_BAR,
- RRDF_FIELD_TRANSFORM_NUMBER, 2, "pgflts/s", CMinFlt_max, RRDF_FIELD_SORT_DESCENDING,
- NULL, RRDF_FIELD_SUMMARY_SUM, RRDF_FIELD_FILTER_RANGE,
- RRDF_FIELD_OPTS_NONE, NULL);
- buffer_rrdf_table_add_field(wb, field_id++, "TMinFlt", "Total Minor Page Faults/s",
- RRDF_FIELD_TYPE_BAR_WITH_INTEGER, RRDF_FIELD_VISUAL_BAR,
- RRDF_FIELD_TRANSFORM_NUMBER, 2, "pgflts/s", TMinFlt_max, RRDF_FIELD_SORT_DESCENDING,
- NULL, RRDF_FIELD_SUMMARY_SUM, RRDF_FIELD_FILTER_RANGE,
- RRDF_FIELD_OPTS_NONE, NULL);
+#if (PROCESSES_HAVE_MAJFLT == 1)
// major page faults
buffer_rrdf_table_add_field(wb, field_id++, "MajFlt", "Major Page Faults/s", RRDF_FIELD_TYPE_BAR_WITH_INTEGER,
RRDF_FIELD_VISUAL_BAR,
@@ -544,24 +696,42 @@ void function_processes(const char *transaction, char *function,
2, "pgflts/s", MajFlt_max, RRDF_FIELD_SORT_DESCENDING, NULL, RRDF_FIELD_SUMMARY_SUM,
RRDF_FIELD_FILTER_RANGE,
RRDF_FIELD_OPTS_NONE, NULL);
+#endif
+
+#if (PROCESSES_HAVE_CHILDREN_FLTS == 1)
+ buffer_rrdf_table_add_field(wb, field_id++, "CMinFlt", "Children Minor Page Faults/s",
+ RRDF_FIELD_TYPE_BAR_WITH_INTEGER,
+ RRDF_FIELD_VISUAL_BAR,
+ RRDF_FIELD_TRANSFORM_NUMBER, 2, "pgflts/s", CMinFlt_max, RRDF_FIELD_SORT_DESCENDING,
+ NULL, RRDF_FIELD_SUMMARY_SUM, RRDF_FIELD_FILTER_RANGE,
+ RRDF_FIELD_OPTS_NONE, NULL);
buffer_rrdf_table_add_field(wb, field_id++, "CMajFlt", "Children Major Page Faults/s",
RRDF_FIELD_TYPE_BAR_WITH_INTEGER,
RRDF_FIELD_VISUAL_BAR,
RRDF_FIELD_TRANSFORM_NUMBER, 2, "pgflts/s", CMajFlt_max, RRDF_FIELD_SORT_DESCENDING,
NULL, RRDF_FIELD_SUMMARY_SUM, RRDF_FIELD_FILTER_RANGE,
RRDF_FIELD_OPTS_NONE, NULL);
+ buffer_rrdf_table_add_field(wb, field_id++, "TMinFlt", "Total Minor Page Faults/s",
+ RRDF_FIELD_TYPE_BAR_WITH_INTEGER, RRDF_FIELD_VISUAL_BAR,
+ RRDF_FIELD_TRANSFORM_NUMBER, 2, "pgflts/s", TMinFlt_max, RRDF_FIELD_SORT_DESCENDING,
+ NULL, RRDF_FIELD_SUMMARY_SUM, RRDF_FIELD_FILTER_RANGE,
+ RRDF_FIELD_OPTS_NONE, NULL);
buffer_rrdf_table_add_field(wb, field_id++, "TMajFlt", "Total Major Page Faults/s",
RRDF_FIELD_TYPE_BAR_WITH_INTEGER, RRDF_FIELD_VISUAL_BAR,
RRDF_FIELD_TRANSFORM_NUMBER, 2, "pgflts/s", TMajFlt_max, RRDF_FIELD_SORT_DESCENDING,
NULL, RRDF_FIELD_SUMMARY_SUM, RRDF_FIELD_FILTER_RANGE,
RRDF_FIELD_OPTS_NONE, NULL);
+#endif
+#if (PROCESSES_HAVE_FDS == 1)
// open file descriptors
+#if (PROCESSES_HAVE_PID_LIMITS == 1)
buffer_rrdf_table_add_field(wb, field_id++, "FDsLimitPercent", "Percentage of Open Descriptors vs Limits",
RRDF_FIELD_TYPE_BAR_WITH_INTEGER, RRDF_FIELD_VISUAL_BAR,
RRDF_FIELD_TRANSFORM_NUMBER, 2, "%", FDsLimitPercent_max, RRDF_FIELD_SORT_DESCENDING, NULL,
RRDF_FIELD_SUMMARY_MAX, RRDF_FIELD_FILTER_RANGE,
RRDF_FIELD_OPTS_NONE, NULL);
+#endif
buffer_rrdf_table_add_field(wb, field_id++, "FDs", "All Open File Descriptors",
RRDF_FIELD_TYPE_BAR_WITH_INTEGER, RRDF_FIELD_VISUAL_BAR,
RRDF_FIELD_TRANSFORM_NUMBER, 0, "fds", FDs_max, RRDF_FIELD_SORT_DESCENDING, NULL,
@@ -614,6 +784,16 @@ void function_processes(const char *transaction, char *function,
RRDF_FIELD_TRANSFORM_NUMBER, 0, "fds", OtherFDs_max, RRDF_FIELD_SORT_DESCENDING,
NULL, RRDF_FIELD_SUMMARY_SUM, RRDF_FIELD_FILTER_RANGE,
RRDF_FIELD_OPTS_NONE, NULL);
+#endif
+
+#if (PROCESSES_HAVE_HANDLES == 1)
+ buffer_rrdf_table_add_field(wb, field_id++, "Handles", "Open Handles", RRDF_FIELD_TYPE_BAR_WITH_INTEGER,
+ RRDF_FIELD_VISUAL_BAR, RRDF_FIELD_TRANSFORM_NUMBER, 0,
+ "handles",
+ Handles_max, RRDF_FIELD_SORT_DESCENDING, NULL, RRDF_FIELD_SUMMARY_SUM,
+ RRDF_FIELD_FILTER_RANGE,
+ RRDF_FIELD_OPTS_VISIBLE, NULL);
+#endif
// processes, threads, uptime
buffer_rrdf_table_add_field(wb, field_id++, "Processes", "Processes", RRDF_FIELD_TYPE_BAR_WITH_INTEGER,
@@ -647,27 +827,39 @@ void function_processes(const char *transaction, char *function,
{
buffer_json_add_array_item_string(wb, "UserCPU");
buffer_json_add_array_item_string(wb, "SysCPU");
+#if (PROCESSES_HAVE_CPU_GUEST_TIME == 1)
buffer_json_add_array_item_string(wb, "GuestCPU");
+#endif
+#if (PROCESSES_HAVE_CPU_CHILDREN_TIME == 1)
buffer_json_add_array_item_string(wb, "CUserCPU");
buffer_json_add_array_item_string(wb, "CSysCPU");
+#if (PROCESSES_HAVE_CPU_GUEST_TIME == 1)
buffer_json_add_array_item_string(wb, "CGuestCPU");
+#endif
+#endif
}
buffer_json_array_close(wb);
}
buffer_json_object_close(wb);
+#if (PROCESSES_HAVE_VOLCTX == 1) || (PROCESSES_HAVE_NVOLCTX == 1)
buffer_json_member_add_object(wb, "CPUCtxSwitches");
{
buffer_json_member_add_string(wb, "name", "CPU Context Switches");
buffer_json_member_add_string(wb, "type", "stacked-bar");
buffer_json_member_add_array(wb, "columns");
{
+#if (PROCESSES_HAVE_VOLCTX == 1)
buffer_json_add_array_item_string(wb, "vCtxSwitch");
+#endif
+#if (PROCESSES_HAVE_NVOLCTX == 1)
buffer_json_add_array_item_string(wb, "iCtxSwitch");
+#endif
}
buffer_json_array_close(wb);
}
buffer_json_object_close(wb);
+#endif
// Memory chart
buffer_json_member_add_object(wb, "Memory");
@@ -685,7 +877,7 @@ void function_processes(const char *transaction, char *function,
}
buffer_json_object_close(wb);
- if(MemTotal) {
+ if(total_memory_bytes) {
// Memory chart
buffer_json_member_add_object(wb, "MemoryPercent");
{
@@ -700,7 +892,7 @@ void function_processes(const char *transaction, char *function,
buffer_json_object_close(wb);
}
-#if !defined(__FreeBSD__) && !defined(__APPLE__)
+#if (PROCESSES_HAVE_LOGICAL_IO == 1) || (PROCESSES_HAVE_PHYSICAL_IO == 1)
// I/O Reads chart
buffer_json_member_add_object(wb, "Reads");
{
@@ -708,8 +900,12 @@ void function_processes(const char *transaction, char *function,
buffer_json_member_add_string(wb, "type", "stacked-bar");
buffer_json_member_add_array(wb, "columns");
{
+#if (PROCESSES_HAVE_LOGICAL_IO == 1)
buffer_json_add_array_item_string(wb, "LReads");
+#endif
+#if (PROCESSES_HAVE_PHYSICAL_IO == 1)
buffer_json_add_array_item_string(wb, "PReads");
+#endif
}
buffer_json_array_close(wb);
}
@@ -722,13 +918,19 @@ void function_processes(const char *transaction, char *function,
buffer_json_member_add_string(wb, "type", "stacked-bar");
buffer_json_member_add_array(wb, "columns");
{
+#if (PROCESSES_HAVE_LOGICAL_IO == 1)
buffer_json_add_array_item_string(wb, "LWrites");
+#endif
+#if (PROCESSES_HAVE_PHYSICAL_IO == 1)
buffer_json_add_array_item_string(wb, "PWrites");
+#endif
}
buffer_json_array_close(wb);
}
buffer_json_object_close(wb);
+#endif
+#if (PROCESSES_HAVE_LOGICAL_IO == 1)
// Logical I/O chart
buffer_json_member_add_object(wb, "LogicalIO");
{
@@ -744,6 +946,7 @@ void function_processes(const char *transaction, char *function,
buffer_json_object_close(wb);
#endif
+#if (PROCESSES_HAVE_PHYSICAL_IO == 1)
// Physical I/O chart
buffer_json_member_add_object(wb, "PhysicalIO");
{
@@ -757,7 +960,9 @@ void function_processes(const char *transaction, char *function,
buffer_json_array_close(wb);
}
buffer_json_object_close(wb);
+#endif
+#if (PROCESSES_HAVE_IO_CALLS == 1)
// I/O Calls chart
buffer_json_member_add_object(wb, "IOCalls");
{
@@ -765,12 +970,13 @@ void function_processes(const char *transaction, char *function,
buffer_json_member_add_string(wb, "type", "stacked-bar");
buffer_json_member_add_array(wb, "columns");
{
- buffer_json_add_array_item_string(wb, "RCalls");
+ buffer_json_add_array_item_string(wb, "ROps");
buffer_json_add_array_item_string(wb, "WCalls");
}
buffer_json_array_close(wb);
}
buffer_json_object_close(wb);
+#endif
// Minor Page Faults chart
buffer_json_member_add_object(wb, "MinFlt");
@@ -890,6 +1096,7 @@ void function_processes(const char *transaction, char *function,
}
buffer_json_object_close(wb);
+#if (PROCESSES_HAVE_UID == 1) || (PROCESSES_HAVE_SID == 1)
// group by User
buffer_json_member_add_object(wb, "User");
{
@@ -902,7 +1109,9 @@ void function_processes(const char *transaction, char *function,
buffer_json_array_close(wb);
}
buffer_json_object_close(wb);
+#endif
+#if (PROCESSES_HAVE_GID == 1)
// group by Group
buffer_json_member_add_object(wb, "Group");
{
@@ -915,14 +1124,20 @@ void function_processes(const char *transaction, char *function,
buffer_json_array_close(wb);
}
buffer_json_object_close(wb);
+#endif
}
buffer_json_object_close(wb); // group_by
+ netdata_mutex_unlock(&apps_and_stdout_mutex);
+
close_and_send:
buffer_json_member_add_time_t(wb, "expires", now_s + update_every);
buffer_json_finalize(wb);
- pluginsd_function_result_to_stdout(transaction, HTTP_RESP_OK, "application/json", now_s + update_every, wb);
+ wb->response_code = HTTP_RESP_OK;
+ wb->content_type = CT_APPLICATION_JSON;
+ wb->expires = now_s + update_every;
+ pluginsd_function_result_to_stdout(transaction, wb);
buffer_free(wb);
}
diff --git a/src/collectors/apps.plugin/apps_groups.conf b/src/collectors/apps.plugin/apps_groups.conf
index 724616c18..8d4b1722c 100644
--- a/src/collectors/apps.plugin/apps_groups.conf
+++ b/src/collectors/apps.plugin/apps_groups.conf
@@ -1,438 +1,247 @@
-#
-# apps.plugin process grouping
-#
-# The apps.plugin displays charts with information about the processes running.
-# This config allows grouping processes together, so that several processes
-# will be reported as one.
-#
-# Only groups in this file are reported. All other processes will be reported
-# as 'other'.
-#
-# For each process given, its whole process tree will be grouped, not just
-# the process matched. The plugin will include both parents and childs.
-#
-# The format is:
-#
-# group: process1 process2 process3 ...
-#
-# Each group can be given multiple times, to add more processes to it.
-#
-# The process names are the ones returned by:
-#
-# - ps -e or /proc/PID/stat
-# - in case of substring mode (see below): /proc/PID/cmdline
-#
-# To add process names with spaces, enclose them in quotes (single or double)
-# example: 'Plex Media Serv' "my other process".
-#
-# Note that spaces are not supported for process groups. Use a dash "-" instead.
-# example-process-group: process1 process2
-#
-# Wildcard support:
-# You can add an asterisk (*) at the beginning and/or the end of a process:
-#
-# *name suffix mode: will search for processes ending with 'name'
-# (/proc/PID/stat)
-#
-# name* prefix mode: will search for processes beginning with 'name'
-# (/proc/PID/stat)
-#
-# *name* substring mode: will search for 'name' in the whole command line
-# (/proc/PID/cmdline)
-#
-# If you enter even just one *name* (substring), apps.plugin will process
-# /proc/PID/cmdline for all processes, just once (when they are first seen).
-#
-# To add processes with single quotes, enclose them in double quotes
-# example: "process with this ' single quote"
-#
-# To add processes with double quotes, enclose them in single quotes:
-# example: 'process with this " double quote'
-#
-# If a group or process name starts with a -, the dimension will be hidden
-# (cpu chart only).
-#
-# If a process starts with a +, debugging will be enabled for it
-# (debugging produces a lot of output - do not enable it in production systems)
-#
-# You can add any number of groups you like. Only the ones found running will
-# affect the charts generated. However, producing charts with hundreds of
-# dimensions may slow down your web browser.
-#
-# The order of the entries in this list is important: the first that matches
-# a process is used, so put important ones at the top. Processes not matched
-# by any row, will inherit it from their parents or children.
-#
-# The order also controls the order of the dimensions on the generated charts
-# (although applications started after apps.plugin is started, will be appended
-# to the existing list of dimensions the netdata daemon maintains).
-
-# -----------------------------------------------------------------------------
-# NETDATA processes accounting
-
-# netdata main process
-netdata: netdata
-
-# netdata known plugins
-# plugins not defined here will be accumulated in netdata, above
-apps.plugin: apps.plugin
-freeipmi.plugin: freeipmi.plugin
-nfacct.plugin: nfacct.plugin
-cups.plugin: cups.plugin
-xenstat.plugin: xenstat.plugin
-perf.plugin: perf.plugin
-charts.d.plugin: *charts.d.plugin*
-python.d.plugin: *python.d.plugin*
-systemd-journal.plugin: *systemd-journal.plugin*
-network-viewer.plugin: *network-viewer.plugin*
-tc-qos-helper: *tc-qos-helper.sh*
-fping: fping
-ioping: ioping
-go.d.plugin: *go.d.plugin*
-slabinfo.plugin: *slabinfo.plugin*
-ebpf.plugin: *ebpf.plugin*
-debugfs.plugin: *debugfs.plugin*
-
-# agent-service-discovery
-agent_sd: agent_sd
+##
+## apps.plugin process grouping
+##
+## Documentation at:
+## https://github.com/netdata/netdata/blob/master/src/collectors/apps.plugin/README.md
+##
+## -----------------------------------------------------------------------------
+## Subprocesses of process managers are monitored individually.
+## (uncomment to add or edit - the default is also hardcoded into the plugin)
-# -----------------------------------------------------------------------------
-# authentication/authorization related servers
+## Clear all the managers, to set yours, otherwise append to the internal list.
+#managers: clear
-auth: radius* openldap* ldap* slapd authelia sssd saslauthd polkitd gssproxy
-fail2ban: fail2ban*
+## Linux process managers
+#managers: init systemd containerd-shim-runc-v2 dumb-init gnome-shell docker-init tini
+#managers: spawn-plugins openrc-run.sh crond plasmashell xfwm4
-# -----------------------------------------------------------------------------
-# web/ftp servers
+## FreeBSD process managers
+#managers: init spawn-plugins
-httpd: apache* httpd nginx* lighttpd hiawatha caddy h2o
-proxy: squid* c-icap squidGuard varnish*
-php: php* lsphp*
-ftpd: proftpd in.tftpd vsftpd
-uwsgi: uwsgi
-unicorn: *unicorn*
-puma: *puma*
+## MacOS process managers
+#managers: launchd spawn-plugins
-# -----------------------------------------------------------------------------
-# database servers
+## Windows process managers
+#managers: wininit services explorer System netdata
-sql: mysqld* mariad* postgres* postmaster* oracle_* ora_* sqlservr
-nosql: mongod redis* valkey* memcached *couchdb*
-timedb: prometheus *carbon-cache.py* *carbon-aggregator.py* *graphite/manage.py* *net.opentsdb.tools.TSDMain* influxd*
+## -----------------------------------------------------------------------------
+## Interpreters to search for the actual command name in command line.
+## (uncomment to add or edit - the default is also hardcoded into the plugin)
-clickhouse: clickhouse-serv* clickhouse-cli* clckhouse-watch
+## Clear all the interpreters, to set yours, otherwise append to the internal list.
+#interpreters: clear
-# -----------------------------------------------------------------------------
-# email servers
+#interpreters: python python2 python3
+#interpreters: sh bash zsh
+#interpreters: node perl awk
-mta: amavis* zmstat-* zmdiaglog zmmailboxdmgr opendkim postfwd2 smtp* lmtp* sendmail postfix master pickup qmgr showq tlsmgr postscreen oqmgr msmtp* nullmailer*
-mda: dovecot *imapd *pop3d *popd
+## -----------------------------------------------------------------------------
+## Processes of interest
+## Grouping and/or rename individual processes.
+## (there is no internal default for this section)
-# -----------------------------------------------------------------------------
-# network, routing, VPN
+## NETDATA processes accounting
+netdata: netdata
-ppp: ppp*
-vpn: openvpn pptp* cjdroute gvpe tincd wireguard tailscaled
-wifi: hostapd wpa_supplicant
-routing: ospfd* ospf6d* bgpd bfdd fabricd isisd eigrpd sharpd staticd ripd ripngd pimd pbrd nhrpd ldpd zebra vrrpd vtysh bird*
-modem: ModemManager
-netmanager: NetworkManager nm* systemd-networkd networkctl netplan connmand wicked* avahi-autoipd networkd-dispatcher
-firewall: firewalld ufw nft
-tor: tor
-bluetooth: bluetooth bluetoothd bluez bluedevil obexd
+## NETDATA agent-service-discovery (kubernetes)
+agent_sd: agent_sd
-# -----------------------------------------------------------------------------
-# high availability and balancers
+## -----------------------------------------------------------------------------
+oracledb: ora_* oracle_* *runOracle.sh*
+unicorn: *unicorn*
+puma: *puma*
+couchdb: *couchdb*
+graphite: *carbon-cache.py* *carbon-aggregator.py* *graphite/manage.py*
+opentsdb: *net.opentsdb.tools.TSDMain*
+imapd: *imapd
+pop3d: *pop3d
+popd: *popd
camo: *camo*
-balancer: ipvs_* haproxy
-ha: corosync hs_logd ha_logd stonithd pacemakerd lrmd crmd keepalived ucarp*
-
-# -----------------------------------------------------------------------------
-# telephony
-
-pbx: asterisk safe_asterisk *vicidial*
-sip: opensips* stund
-
-# -----------------------------------------------------------------------------
-# chat
-
-chat: irssi *vines* *prosody* murmurd
-
-# -----------------------------------------------------------------------------
-# monitoring
-
-logs: ulogd* syslog* rsyslog* logrotate *systemd-journal* rotatelogs sysklogd metalog
-nms: snmpd vnstatd smokeping zabbix* munin* mon openhpid tailon nrpe
-monit: monit
-splunk: splunkd
+vicidial: *vicidial*
+vines: *vines*
+prosody: *prosody*
azure: mdsd *waagent* *omiserver* *omiagent* hv_kvp_daemon hv_vss_daemon *auoms* *omsagent*
datadog: *datadog*
-edgedelta: edgedelta
newrelic: newrelic*
google-agent: *google_guest_agent* *google_osconfig_agent*
-nvidia-smi: nvidia-smi
-intel_gpu_top: intel_gpu_top
-htop: htop
-watchdog: watchdog
-telegraf: telegraf
-grafana: grafana*
-
-# -----------------------------------------------------------------------------
-# storage, file systems and file servers
-
-ceph: ceph-* ceph_* radosgw* rbd-* cephfs-* osdmaptool crushtool
-samba: smbd nmbd winbindd ctdbd ctdb-* ctdb_*
-nfs: rpcbind rpc.* nfs*
-zfs: spl_* z_* txg_* zil_* arc_* l2arc*
-btrfs: btrfs*
-iscsi: iscsid iscsi_eh
-afp: netatalk afpd cnid_dbd cnid_metad
-ntfs-3g: ntfs-3g
-
-# -----------------------------------------------------------------------------
-# kubernetes
-
-kubelet: kubelet
-kube-dns: kube-dns
-kube-proxy: kube-proxy
-metrics-server: metrics-server
-heapster: heapster
-
-# -----------------------------------------------------------------------------
-# AWS
-
aws-s3: '*aws s3*' s3cmd s5cmd
-aws: aws
-
-# -----------------------------------------------------------------------------
-# virtualization platform
-
proxmox-ve: pve* spiceproxy
-
-# -----------------------------------------------------------------------------
-# containers & virtual machines
-
-containers: lxc* docker* balena* containerd
-VMs: vbox* VBox* qemu* kvm*
libvirt: virtlogd virtqemud virtstoraged virtnetworkd virtlockd virtinterfaced
libvirt: virtnodedevd virtproxyd virtsecretd libvirtd
guest-agent: qemu-ga spice-vdagent cloud-init*
-
-# -----------------------------------------------------------------------------
-# ssh servers and clients
-
-ssh: ssh* scp sftp* dropbear
-
-# -----------------------------------------------------------------------------
-# print servers and clients
-
-print: cups* lpd lpq
-
-# -----------------------------------------------------------------------------
-# time servers and clients
-
-time: ntp* systemd-timesyn* chronyd ptp*
-
-# -----------------------------------------------------------------------------
-# dhcp servers and clients
-
-dhcp: *dhcp* dhclient
-
-# -----------------------------------------------------------------------------
-# name servers and clients
-
-dns: named unbound nsd pdns_server knotd gdnsd yadifad dnsmasq *systemd-resolve* pihole* avahi-daemon avahi-dnsconfd
-dnsdist: dnsdist
-
-# -----------------------------------------------------------------------------
-# installation / compilation / debugging
+dhcp: dhcp* dhclient
build: cc1 cc1plus as gcc* cppcheck ld make cmake automake autoconf autoreconf
build: cargo rustc bazel buck git gdb valgrind* rpmbuild dpkg-buildpackage
-
-# -----------------------------------------------------------------------------
-# package management
-
-packagemanager: apt* dpkg* dselect dnf yum rpm zypp* yast* pacman xbps* swupd* emerge*
-packagemanager: packagekitd pkgin pkg apk snapd slackpkg slapt-get
-
-# -----------------------------------------------------------------------------
-# antivirus
-
-antivirus: clam* *clam imunify360*
-
-# -----------------------------------------------------------------------------
-# torrent clients
-
-torrents: *deluge* transmission* *SickBeard* *CouchPotato* *rtorrent*
-
-# -----------------------------------------------------------------------------
-# backup servers and clients
-
+packagemanager: apt* dpkg* dselect dnf yum rpm zypp* yast* pacman xbps* swupd*
+packagemanager: packagekitd pkgin pkg apk snapd slackpkg slapt-get emerge*
+clam: clam* *clam
backup: rsync lsyncd bacula* borg rclone
-
-# -----------------------------------------------------------------------------
-# cron
-
cron: cron* atd anacron *systemd-cron* incrond
-
-# -----------------------------------------------------------------------------
-# UPS
-
ups: upsmon upsd */nut/* apcupsd
-# -----------------------------------------------------------------------------
-# media players, servers, clients
-
-media: mplayer vlc xine mediatomb omxplayer* kodi* xbmc* mediacenter eventlircd
-media: mpd minidlnad mt-daapd Plex* jellyfin squeeze* jackett Ombi
-media: strawberry* clementine*
+rabbitmq: *rabbitmq*
+sidekiq: *sidekiq*
+vernemq: *beam.smp*vernemq* *start_vernemq* *run_erl*vernemq* *vernemq*epmd*
+erlang: beam.smp
+postfix: *postfix*
-audio: pulse* pipewire wireplumber jack*
+dagster: *dagster*
-# -----------------------------------------------------------------------------
-# java applications
+## -----------------------------------------------------------------------------
+## java applications
hdfsdatanode: *org.apache.hadoop.hdfs.server.datanode.DataNode*
hdfsnamenode: *org.apache.hadoop.hdfs.server.namenode.NameNode*
hdfsjournalnode: *org.apache.hadoop.hdfs.qjournal.server.JournalNode*
hdfszkfc: *org.apache.hadoop.hdfs.tools.DFSZKFailoverController*
-
yarnnode: *org.apache.hadoop.yarn.server.nodemanager.NodeManager*
yarnmgr: *org.apache.hadoop.yarn.server.resourcemanager.ResourceManager*
yarnproxy: *org.apache.hadoop.yarn.server.webproxy.WebAppProxyServer*
-
sparkworker: *org.apache.spark.deploy.worker.Worker*
sparkmaster: *org.apache.spark.deploy.master.Master*
-
hbaseregion: *org.apache.hadoop.hbase.regionserver.HRegionServer*
hbaserest: *org.apache.hadoop.hbase.rest.RESTServer*
hbasethrift: *org.apache.hadoop.hbase.thrift.ThriftServer*
hbasemaster: *org.apache.hadoop.hbase.master.HMaster*
-
zookeeper: *org.apache.zookeeper.server.quorum.QuorumPeerMain*
-
hive2: *org.apache.hive.service.server.HiveServer2*
hivemetastore: *org.apache.hadoop.hive.metastore.HiveMetaStore*
-
solr: *solr.install.dir*
-
airflow: *airflow*
+kafka: *kafka.Kafka*
-# -----------------------------------------------------------------------------
-# GUI
-
-X: X Xorg xinit xdm Xwayland xsettingsd touchegg
-wayland: swaylock swayidle waypipe wayvnc
-kde: *kdeinit* kdm sddm plasmashell startplasma-* kwin* kwallet* krunner kactivitymanager*
-gnome: gnome-* gdm gconf* mutter
-mate: mate-* msd-* marco*
-cinnamon: cinnamon* muffin
-xfce: xfwm4 xfdesktop xfce* Thunar xfsettingsd xfconf*
-lxde: lxde* startlxde lxdm lxappearance* lxlauncher* lxpanel* lxsession* lxsettings*
-lxqt: lxqt* startlxqt
-enlightenment: entrance enlightenment*
-i3: i3*
-awesome: awesome awesome-client
-dwm: dwm.*
-sway: sway
-weston: weston
-cage: cage
-wayfire: wayfire
-gui: lightdm colord seatd greetd gkrellm slim qingy dconf* *gvfs gvfs*
-gui: '*systemd --user*' xdg-* at-spi-*
-
-webbrowser: *chrome-sandbox* *google-chrome* *chromium* *firefox* vivaldi* opera* epiphany chrome*
-webbrowser: lynx elinks w3m w3mmee links
-mua: evolution-* thunderbird* mutt neomutt pine mailx alpine
-
-# -----------------------------------------------------------------------------
-# Kernel / System
+## -----------------------------------------------------------------------------
+## Kernel / System
+## The following are interesting kernel threads and related processes to
+## monitor individually, mainly for their CPU utilization.
+## These kernel threads switch tasks all the time, so they should never be
+## categorized as anything specific.
+kernel: kworker/*
+
+## Kernel Samepage Merging (KSM) daemon that looks for identical memory pages
+## across processes and merges them to save memory.
ksmd: ksmd
-khugepaged: khugepaged
+
+## Handles migration of processes between CPU cores to balance load.
+kmigration: migration/*
+
+## Manages memory compaction, moving memory pages around to reduce
+## fragmentation.
+kcompactd: kcompactd*
+
+## Responsible for freeing up memory by swapping pages to disk when needed.
+kswapd: kswapd*
+
+## DAMON is a mechanism designed to efficiently monitor the memory access
+## patterns of running processes or the system itself.
kdamond: kdamond
-kswapd: kswapd
-zswap: zswap
-kcompactd: kcompactd
-system: systemd* udisks* udevd* *udevd ipv6_addrconf dbus-* rtkit*
-system: mdadm acpid uuidd upowerd elogind* eudev mdev lvmpolld dmeventd
-system: accounts-daemon rngd haveged rasdaemon irqbalance start-stop-daemon
-system: supervise-daemon openrc* init runit runsvdir runsv auditd lsmd
-system: abrt* nscd rtkit-daemon gpg-agent usbguard* boltd geoclue
+## Manages ballooning in virtualized environments.
+vballoon: vballoon*
-kernel: kworker kthreadd kauditd lockd khelper kdevtmpfs khungtaskd rpciod
-kernel: fsnotify_mark kthrotld deferwq scsi_* kdmflush oom_reaper kdevtempfs
-kernel: ksoftirqd
+## virtio - Handles or I/O (storage and network) on virtual machines.
+kvirtio: virtio-* vhost-*
-# -----------------------------------------------------------------------------
-# inetd
+## Layer 4 (transport layer) load balancing
+ipvs: ipvsd ipvs_* ip_vs_*
-inetd: inetd xinetd
+## Hugepages
+## Scans memory regions and tries to promote regular-sized pages (4KB) into
+## hugepages (2MB) where possible. Merge smaller contiguous 4KB pages into 2MB
+## pages. Hugepages also use: kswapd, kcompactd, and migration.
+khugepaged: khugepaged
-# -----------------------------------------------------------------------------
-# other application servers
+## Note about zswap:
+## zswap does not introduce its own dedicated kernel threads. Instead, it
+## operates within the existing memory management and swapping framework of the
+## kernel:
+## - kswapd: swaps pages in/out of memory, using compression in the process.
+## - kcompactd: compacts memory when pages are compressed or moved around.
-i2pd: i2pd
+## -----------------------------------------------------------------------------
+## Block Devices
-rethinkdb: rethinkdb
+## Handles deferred block I/O operations for block devices.
+kblockd: kblockd
-beanstalkd: beanstalkd
+## Device Mapper (DM)
+device-mapper: kcopyd/* kcryptd/* kdmflush/* dm_bufio_cache
+device-mapper: raid1/* raid5/* raid10/* multipathd bioset/*
-rspamd: rspamd
+## Software RAID (MD)
+md-raid: md*_raid* md*_resync md*_reshape md*_recovery md_thread
+md-raid: flush_md* raid*_sync
-consul: consul
+## iSCSI
+iscsi: iscsid iscsiadm iscsi_eh/* iscsi_xmit/* iscsi_ttx/* iscsi_rx/* iscsi_trx/*
-kafka: *kafka.Kafka*
+## SCSI
+scsi: scsi_eh/* scsi_tmf/* scsi_wq/*
-rabbitmq: *rabbitmq*
+## BCACHE
+bcache: bcache* bch_btree_io bch_journal
-sidekiq: *sidekiq*
-java: java
-ipfs: ipfs
-erlang: beam.smp
+## SAS
+sas: sas_task/* mpt*
-node: node
-factorio: factorio
+## Fibre Channel (FC)
+fc: fc_transport qla2xxx*
-p4: p4*
+## loop devices
+loop: loop* flush-loop*
-git-services: gitea gitlab-runner
+## -----------------------------------------------------------------------------
+## Filesystems
-freeswitch: freeswitch*
+## Ext4
+ext4: ext4-* jbd2/*
-# -------- web3 / blockchains ----------
+## XFS
+xfs: xfs*
-go-ethereum: geth*
-nethermind-ethereum: nethermind*
-besu-ethereum: besu*
-openEthereum: openethereum*
-urbit: urbit*
-bitcoin-node: *bitcoind* lnd*
-filecoin: lotus* lotus-miner* lotus-worker*
-solana: solana*
-web3: *hardhat* *ganache* *truffle* *brownie* *waffle*
-terra: terra* mantle*
+## BTRFS
+btrfs: btrfs*
-# -----------------------------------------------------------------------------
-# chaos engineering tools
+## NFS
+nfs: rpcbind rpc.* nfs* rpciod
-stress: stress stress-ng*
-gremlin: gremlin*
+## ZFS
+zfs: spl_* z_* txg_* zil_* arc_* l2arc* zfs* zed zdb zpool*
+
+## CEPH
+ceph: ceph-* ceph_* radosgw* rbd-* cephfs-*
+ceph: ceph cephadm osdmaptool crushtool rados rbd
+
+## CIFS & Samba
+cifs: smbd nmbd winbindd ctdbd ctdb-* ctdb_*
+cifs: cifsd cifscreds cifs.upcall
+
+## Apple Filling Protocol (AFP)
+afp: netatalk afpd cnid_dbd cnid_metad
-# -----------------------------------------------------------------------------
-# load testing tools
+## -----------------------------------------------------------------------------
+## Desktops
-locust: locust
+systemd-journald: *systemd-journal*
+systemd: systemd systemd-*
-# -----------------------------------------------------------------------------
-# data science and machine learning tools
+## GNOME
+desktop: gnome-* gsd-* gjs goa-* gcr-* gvfs-* *xdg-*-gnome* passimd gvfsd*
+desktop: at-spi-* at-spi2-* dconf-service gcr-*
-jupyter: jupyter*
+## KDE
+desktop: plasmashell kwin-* kde* *-kde-* klauncher kactivitymanagerd krunner
+desktop: kdeconnectd ksmserver kglobalaccel5 plasma-* *org.kde.*
+desktop: sddm* kwalletd5 knotify5 kmix kscreen kwayland-*
-# -----------------------------------------------------------------------------
-# File synchronization tools
+## XFCE4
+desktop: xfce4-* xfwm4 xfdesktop xfce4-panel xfsettingsd xfconfd
+desktop: lightdm lightdm-*
-filesync: dropbox syncthing
+## Generic tools related to desktop
+desktop: gdm gdm-* dbus-* xdg-* ibus-* evolution-* accounts-daemon colord
+desktop: geoclue pulse* pipewire* wireplumber jack* touchegg pulseaudio
+desktop: Xwayland Xorg
diff --git a/src/collectors/apps.plugin/apps_incremental_collection.c b/src/collectors/apps.plugin/apps_incremental_collection.c
new file mode 100644
index 000000000..e2f0e3ab7
--- /dev/null
+++ b/src/collectors/apps.plugin/apps_incremental_collection.c
@@ -0,0 +1,187 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "apps_plugin.h"
+
+#if (INCREMENTAL_DATA_COLLECTION == 1)
+bool managed_log(struct pid_stat *p, PID_LOG log, bool status) {
+ if(unlikely(!status)) {
+ // netdata_log_error("command failed log %u, errno %d", log, errno);
+
+ if(unlikely(debug_enabled || errno != ENOENT)) {
+ if(unlikely(debug_enabled || !(p->log_thrown & log))) {
+ p->log_thrown |= log;
+ switch(log) {
+ case PID_LOG_IO:
+#if !defined(OS_LINUX)
+ netdata_log_error("Cannot fetch process %d I/O info (command '%s')", p->pid, pid_stat_comm(p));
+#else
+ netdata_log_error("Cannot process %s/proc/%d/io (command '%s')", netdata_configured_host_prefix, p->pid, pid_stat_comm(p));
+#endif
+ break;
+
+ case PID_LOG_STATUS:
+#if !defined(OS_LINUX)
+ netdata_log_error("Cannot fetch process %d status info (command '%s')", p->pid, pid_stat_comm(p));
+#else
+ netdata_log_error("Cannot process %s/proc/%d/status (command '%s')", netdata_configured_host_prefix, p->pid, pid_stat_comm(p));
+#endif
+ break;
+
+ case PID_LOG_CMDLINE:
+#if !defined(OS_LINUX)
+ netdata_log_error("Cannot fetch process %d command line (command '%s')", p->pid, pid_stat_comm(p));
+#else
+ netdata_log_error("Cannot process %s/proc/%d/cmdline (command '%s')", netdata_configured_host_prefix, p->pid, pid_stat_comm(p));
+#endif
+ break;
+
+ case PID_LOG_FDS:
+#if !defined(OS_LINUX)
+ netdata_log_error("Cannot fetch process %d files (command '%s')", p->pid, pid_stat_comm(p));
+#else
+ netdata_log_error("Cannot process entries in %s/proc/%d/fd (command '%s')", netdata_configured_host_prefix, p->pid, pid_stat_comm(p));
+#endif
+ break;
+
+ case PID_LOG_LIMITS:
+#if !defined(OS_LINUX)
+ ;
+#else
+ netdata_log_error("Cannot process %s/proc/%d/limits (command '%s')", netdata_configured_host_prefix, p->pid, pid_stat_comm(p));
+#endif
+
+ case PID_LOG_STAT:
+ break;
+
+ default:
+ netdata_log_error("unhandled error for pid %d, command '%s'", p->pid, pid_stat_comm(p));
+ break;
+ }
+ }
+ }
+ errno_clear();
+ }
+ else if(unlikely(p->log_thrown & log)) {
+ // netdata_log_error("unsetting log %u on pid %d", log, p->pid);
+ p->log_thrown &= ~log;
+ }
+
+ return status;
+}
+
+static inline bool incrementally_read_pid_stat(struct pid_stat *p, void *ptr) {
+ p->last_stat_collected_usec = p->stat_collected_usec;
+ p->stat_collected_usec = now_monotonic_usec();
+ calls_counter++;
+
+ if(!OS_FUNCTION(apps_os_read_pid_stat)(p, ptr))
+ return 0;
+
+ return 1;
+}
+
+static inline int incrementally_read_pid_io(struct pid_stat *p, void *ptr) {
+ p->last_io_collected_usec = p->io_collected_usec;
+ p->io_collected_usec = now_monotonic_usec();
+ calls_counter++;
+
+ bool ret = OS_FUNCTION(apps_os_read_pid_io)(p, ptr);
+
+ return ret ? 1 : 0;
+}
+
+// --------------------------------------------------------------------------------------------------------------------
+
+int incrementally_collect_data_for_pid_stat(struct pid_stat *p, void *ptr) {
+ if(unlikely(p->read)) return 0;
+
+ pid_collection_started(p);
+
+ // --------------------------------------------------------------------
+ // /proc/<pid>/stat
+
+ if(unlikely(!managed_log(p, PID_LOG_STAT, incrementally_read_pid_stat(p, ptr)))) {
+ // there is no reason to proceed if we cannot get its status
+ pid_collection_failed(p);
+ return 0;
+ }
+
+ // check its parent pid
+ if(unlikely(p->ppid < INIT_PID))
+ p->ppid = 0;
+
+ // --------------------------------------------------------------------
+ // /proc/<pid>/io
+
+ managed_log(p, PID_LOG_IO, incrementally_read_pid_io(p, ptr));
+
+ // --------------------------------------------------------------------
+ // /proc/<pid>/status
+
+ if(unlikely(!managed_log(p, PID_LOG_STATUS, OS_FUNCTION(apps_os_read_pid_status)(p, ptr)))) {
+ // there is no reason to proceed if we cannot get its status
+ pid_collection_failed(p);
+ return 0;
+ }
+
+ // --------------------------------------------------------------------
+ // /proc/<pid>/fd
+
+#if (PROCESSES_HAVE_FDS == 1)
+ if(enable_file_charts) {
+ managed_log(p, PID_LOG_FDS, read_pid_file_descriptors(p, ptr));
+#if (PROCESSES_HAVE_PID_LIMITS == 1)
+ managed_log(p, PID_LOG_LIMITS, OS_FUNCTION(apps_os_read_pid_limits)(p, ptr));
+#endif
+ }
+#endif
+
+ // --------------------------------------------------------------------
+ // done!
+
+#if defined(NETDATA_INTERNAL_CHECKS) && (ALL_PIDS_ARE_READ_INSTANTLY == 0)
+ struct pid_stat *pp = p->parent;
+ if(unlikely(include_exited_childs && pp && !pp->read))
+ nd_log(NDLS_COLLECTORS, NDLP_WARNING,
+ "Read process %d (%s) sortlisted %"PRIu32", but its parent %d (%s) sortlisted %"PRIu32", is not read",
+ p->pid, pid_stat_comm(p), p->sortlist, pp->pid, pid_stat_comm(pp), pp->sortlist);
+#endif
+
+ pid_collection_completed(p);
+
+ return 1;
+}
+
+int incrementally_collect_data_for_pid(pid_t pid, void *ptr) {
+ if(unlikely(pid < INIT_PID)) {
+ netdata_log_error("Invalid pid %d read (expected >= %d). Ignoring process.", pid, INIT_PID);
+ return 0;
+ }
+
+ struct pid_stat *p = get_or_allocate_pid_entry(pid);
+ if(unlikely(!p)) return 0;
+
+ return incrementally_collect_data_for_pid_stat(p, ptr);
+}
+#endif
+
+// --------------------------------------------------------------------------------------------------------------------
+
+#if (PROCESSES_HAVE_CMDLINE == 1)
+int read_proc_pid_cmdline(struct pid_stat *p) {
+ static char cmdline[MAX_CMDLINE];
+
+ if(unlikely(!OS_FUNCTION(apps_os_get_pid_cmdline)(p, cmdline, sizeof(cmdline))))
+ goto cleanup;
+
+ update_pid_cmdline(p, cmdline);
+
+ return 1;
+
+cleanup:
+ // copy the command to the command line
+ string_freez(p->cmdline);
+ p->cmdline = NULL;
+ return 0;
+}
+#endif
diff --git a/src/collectors/apps.plugin/apps_os_freebsd.c b/src/collectors/apps.plugin/apps_os_freebsd.c
new file mode 100644
index 000000000..1877410d6
--- /dev/null
+++ b/src/collectors/apps.plugin/apps_os_freebsd.c
@@ -0,0 +1,368 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "apps_plugin.h"
+
+#if defined(OS_FREEBSD)
+
+usec_t system_current_time_ut;
+long global_block_size = 512;
+
+static long get_fs_block_size(void) {
+ struct statvfs vfs;
+ static long block_size = 0;
+
+ if (block_size == 0) {
+ if (statvfs("/", &vfs) == 0) {
+ block_size = vfs.f_frsize ? vfs.f_frsize : vfs.f_bsize;
+ } else {
+ // If statvfs fails, fall back to the typical block size
+ block_size = 512;
+ }
+ }
+
+ return block_size;
+}
+
+void apps_os_init_freebsd(void) {
+ global_block_size = get_fs_block_size();
+}
+
+static inline void get_current_time(void) {
+ struct timeval current_time;
+ gettimeofday(&current_time, NULL);
+ system_current_time_ut = timeval_usec(&current_time);
+}
+
+uint64_t apps_os_get_total_memory_freebsd(void) {
+ uint64_t ret = 0;
+
+ int mib[2] = {CTL_HW, HW_PHYSMEM};
+ size_t size = sizeof(ret);
+ if (sysctl(mib, 2, &ret, &size, NULL, 0) == -1) {
+ netdata_log_error("Failed to get total memory using sysctl");
+ return 0;
+ }
+
+ return ret;
+}
+
+bool apps_os_read_pid_fds_freebsd(struct pid_stat *p, void *ptr) {
+ int mib[4];
+ size_t size;
+ struct kinfo_file *fds;
+ static char *fdsbuf;
+ char *bfdsbuf, *efdsbuf;
+ char fdsname[FILENAME_MAX + 1];
+#define SHM_FORMAT_LEN 31 // format: 21 + size: 10
+ char shm_name[FILENAME_MAX - SHM_FORMAT_LEN + 1];
+
+ // we make all pid fds negative, so that
+ // we can detect unused file descriptors
+ // at the end, to free them
+ make_all_pid_fds_negative(p);
+
+ mib[0] = CTL_KERN;
+ mib[1] = KERN_PROC;
+ mib[2] = KERN_PROC_FILEDESC;
+ mib[3] = p->pid;
+
+ if (unlikely(sysctl(mib, 4, NULL, &size, NULL, 0))) {
+ netdata_log_error("sysctl error: Can't get file descriptors data size for pid %d", p->pid);
+ return false;
+ }
+ if (likely(size > 0))
+ fdsbuf = reallocz(fdsbuf, size);
+ if (unlikely(sysctl(mib, 4, fdsbuf, &size, NULL, 0))) {
+ netdata_log_error("sysctl error: Can't get file descriptors data for pid %d", p->pid);
+ return false;
+ }
+
+ bfdsbuf = fdsbuf;
+ efdsbuf = fdsbuf + size;
+ while (bfdsbuf < efdsbuf) {
+ fds = (struct kinfo_file *)(uintptr_t)bfdsbuf;
+ if (unlikely(fds->kf_structsize == 0))
+ break;
+
+ // do not process file descriptors for current working directory, root directory,
+ // jail directory, ktrace vnode, text vnode and controlling terminal
+ if (unlikely(fds->kf_fd < 0)) {
+ bfdsbuf += fds->kf_structsize;
+ continue;
+ }
+
+ // get file descriptors array index
+ size_t fdid = fds->kf_fd;
+
+ // check if the fds array is small
+ if (unlikely(fdid >= p->fds_size)) {
+ // it is small, extend it
+
+ uint32_t new_size = fds_new_size(p->fds_size, fdid);
+
+ debug_log("extending fd memory slots for %s from %u to %u",
+ pid_stat_comm(p), p->fds_size, new_size);
+
+ p->fds = reallocz(p->fds, new_size * sizeof(struct pid_fd));
+
+ // and initialize it
+ init_pid_fds(p, p->fds_size, new_size - p->fds_size);
+ p->fds_size = new_size;
+ }
+
+ if (unlikely(p->fds[fdid].fd == 0)) {
+ // we don't know this fd, get it
+
+ switch (fds->kf_type) {
+ case KF_TYPE_FIFO:
+ case KF_TYPE_VNODE:
+ if (unlikely(!fds->kf_path[0])) {
+ sprintf(fdsname, "other: inode: %lu", fds->kf_un.kf_file.kf_file_fileid);
+ break;
+ }
+ sprintf(fdsname, "%s", fds->kf_path);
+ break;
+ case KF_TYPE_SOCKET:
+ switch (fds->kf_sock_domain) {
+ case AF_INET:
+ case AF_INET6:
+#if __FreeBSD_version < 1400074
+ if (fds->kf_sock_protocol == IPPROTO_TCP)
+ sprintf(fdsname, "socket: %d %lx", fds->kf_sock_protocol, fds->kf_un.kf_sock.kf_sock_inpcb);
+ else
+#endif
+ sprintf(fdsname, "socket: %d %lx", fds->kf_sock_protocol, fds->kf_un.kf_sock.kf_sock_pcb);
+ break;
+ case AF_UNIX:
+ /* print address of pcb and connected pcb */
+ sprintf(fdsname, "socket: %lx %lx", fds->kf_un.kf_sock.kf_sock_pcb, fds->kf_un.kf_sock.kf_sock_unpconn);
+ break;
+ default:
+ /* print protocol number and socket address */
+#if __FreeBSD_version < 1200031
+ sprintf(fdsname, "socket: other: %d %s %s", fds->kf_sock_protocol, fds->kf_sa_local.__ss_pad1, fds->kf_sa_local.__ss_pad2);
+#else
+ sprintf(fdsname, "socket: other: %d %s %s", fds->kf_sock_protocol, fds->kf_un.kf_sock.kf_sa_local.__ss_pad1, fds->kf_un.kf_sock.kf_sa_local.__ss_pad2);
+#endif
+ }
+ break;
+ case KF_TYPE_PIPE:
+ sprintf(fdsname, "pipe: %lu %lu", fds->kf_un.kf_pipe.kf_pipe_addr, fds->kf_un.kf_pipe.kf_pipe_peer);
+ break;
+ case KF_TYPE_PTS:
+#if __FreeBSD_version < 1200031
+ sprintf(fdsname, "other: pts: %u", fds->kf_un.kf_pts.kf_pts_dev);
+#else
+ sprintf(fdsname, "other: pts: %lu", fds->kf_un.kf_pts.kf_pts_dev);
+#endif
+ break;
+ case KF_TYPE_SHM:
+ strncpyz(shm_name, fds->kf_path, FILENAME_MAX - SHM_FORMAT_LEN);
+ sprintf(fdsname, "other: shm: %s size: %lu", shm_name, fds->kf_un.kf_file.kf_file_size);
+ break;
+ case KF_TYPE_SEM:
+ sprintf(fdsname, "other: sem: %u", fds->kf_un.kf_sem.kf_sem_value);
+ break;
+ default:
+ sprintf(fdsname, "other: pid: %d fd: %d", fds->kf_un.kf_proc.kf_pid, fds->kf_fd);
+ }
+
+ // if another process already has this, we will get
+ // the same id
+ p->fds[fdid].fd = file_descriptor_find_or_add(fdsname, 0);
+ }
+
+ // else make it positive again, we need it
+ // of course, the actual file may have changed
+
+ else
+ p->fds[fdid].fd = -p->fds[fdid].fd;
+
+ bfdsbuf += fds->kf_structsize;
+ }
+
+ return true;
+}
+
+bool apps_os_get_pid_cmdline_freebsd(struct pid_stat *p, char *cmdline, size_t bytes) {
+ size_t i, b = bytes - 1;
+ int mib[4];
+
+ mib[0] = CTL_KERN;
+ mib[1] = KERN_PROC;
+ mib[2] = KERN_PROC_ARGS;
+ mib[3] = p->pid;
+ if (unlikely(sysctl(mib, 4, cmdline, &b, NULL, 0)))
+ return false;
+
+ cmdline[b] = '\0';
+ for(i = 0; i < b ; i++)
+ if(unlikely(!cmdline[i])) cmdline[i] = ' ';
+
+ return true;
+}
+
+bool apps_os_read_pid_io_freebsd(struct pid_stat *p, void *ptr) {
+ struct kinfo_proc *proc_info = (struct kinfo_proc *)ptr;
+
+ pid_incremental_rate(io, PDF_LREAD, proc_info->ki_rusage.ru_inblock * global_block_size);
+ pid_incremental_rate(io, PDF_LWRITE, proc_info->ki_rusage.ru_oublock * global_block_size);
+
+ return true;
+}
+
+bool apps_os_read_pid_limits_freebsd(struct pid_stat *p __maybe_unused, void *ptr __maybe_unused) {
+ return false;
+}
+
+bool apps_os_read_pid_status_freebsd(struct pid_stat *p, void *ptr) {
+ struct kinfo_proc *proc_info = (struct kinfo_proc *)ptr;
+
+ p->uid = proc_info->ki_uid;
+ p->gid = proc_info->ki_groups[0];
+ p->values[PDF_VMSIZE] = proc_info->ki_size;
+ p->values[PDF_VMRSS] = proc_info->ki_rssize * pagesize;
+ // TODO: what about shared and swap memory on FreeBSD?
+ return true;
+}
+
+//bool apps_os_read_global_cpu_utilization_freebsd(void) {
+// static kernel_uint_t utime_raw = 0, stime_raw = 0, ntime_raw = 0;
+// static usec_t collected_usec = 0, last_collected_usec = 0;
+// long cp_time[CPUSTATES];
+//
+// if (unlikely(CPUSTATES != 5)) {
+// goto cleanup;
+// } else {
+// static int mib[2] = {0, 0};
+//
+// if (unlikely(GETSYSCTL_SIMPLE("kern.cp_time", mib, cp_time))) {
+// goto cleanup;
+// }
+// }
+//
+// last_collected_usec = collected_usec;
+// collected_usec = now_monotonic_usec();
+//
+// calls_counter++;
+//
+// // temporary - it is added global_ntime;
+// kernel_uint_t global_ntime = 0;
+//
+// incremental_rate(global_utime, utime_raw, cp_time[0], collected_usec, last_collected_usec, (NSEC_PER_SEC / system_hz));
+// incremental_rate(global_ntime, ntime_raw, cp_time[1], collected_usec, last_collected_usec, (NSEC_PER_SEC / system_hz));
+// incremental_rate(global_stime, stime_raw, cp_time[2], collected_usec, last_collected_usec, (NSEC_PER_SEC / system_hz));
+//
+// global_utime += global_ntime;
+//
+// if(unlikely(global_iterations_counter == 1)) {
+// global_utime = 0;
+// global_stime = 0;
+// global_gtime = 0;
+// }
+//
+// return 1;
+//
+//cleanup:
+// global_utime = 0;
+// global_stime = 0;
+// global_gtime = 0;
+// return 0;
+//}
+
+bool apps_os_read_pid_stat_freebsd(struct pid_stat *p, void *ptr) {
+ struct kinfo_proc *proc_info = (struct kinfo_proc *)ptr;
+ if (unlikely(proc_info->ki_tdflags & TDF_IDLETD))
+ goto cleanup;
+
+ char *comm = proc_info->ki_comm;
+ p->ppid = proc_info->ki_ppid;
+
+ update_pid_comm(p, comm);
+
+ pid_incremental_rate(stat, PDF_MINFLT, (kernel_uint_t)proc_info->ki_rusage.ru_minflt);
+ pid_incremental_rate(stat, PDF_CMINFLT, (kernel_uint_t)proc_info->ki_rusage_ch.ru_minflt);
+ pid_incremental_rate(stat, PDF_MAJFLT, (kernel_uint_t)proc_info->ki_rusage.ru_majflt);
+ pid_incremental_rate(stat, PDF_CMAJFLT, (kernel_uint_t)proc_info->ki_rusage_ch.ru_majflt);
+ pid_incremental_cpu(stat, PDF_UTIME, (kernel_uint_t)proc_info->ki_rusage.ru_utime.tv_sec * NSEC_PER_SEC + proc_info->ki_rusage.ru_utime.tv_usec * NSEC_PER_USEC);
+ pid_incremental_cpu(stat, PDF_STIME, (kernel_uint_t)proc_info->ki_rusage.ru_stime.tv_sec * NSEC_PER_SEC + proc_info->ki_rusage.ru_stime.tv_usec * NSEC_PER_USEC);
+ pid_incremental_cpu(stat, PDF_CUTIME, (kernel_uint_t)proc_info->ki_rusage_ch.ru_utime.tv_sec * NSEC_PER_SEC + proc_info->ki_rusage_ch.ru_utime.tv_usec * NSEC_PER_USEC);
+ pid_incremental_cpu(stat, PDF_CSTIME, (kernel_uint_t)proc_info->ki_rusage_ch.ru_stime.tv_sec * NSEC_PER_SEC + proc_info->ki_rusage_ch.ru_stime.tv_usec * NSEC_PER_USEC);
+
+ p->values[PDF_THREADS] = proc_info->ki_numthreads;
+
+ usec_t started_ut = timeval_usec(&proc_info->ki_start);
+ p->values[PDF_UPTIME] = (system_current_time_ut > started_ut) ? (system_current_time_ut - started_ut) / USEC_PER_SEC : 0;
+
+ if(unlikely(debug_enabled))
+ debug_log_int("READ PROC/PID/STAT: %s/proc/%d/stat, process: '%s' on target '%s' (dt=%llu) VALUES: utime=" KERNEL_UINT_FORMAT ", stime=" KERNEL_UINT_FORMAT ", cutime=" KERNEL_UINT_FORMAT ", cstime=" KERNEL_UINT_FORMAT ", minflt=" KERNEL_UINT_FORMAT ", majflt=" KERNEL_UINT_FORMAT ", cminflt=" KERNEL_UINT_FORMAT ", cmajflt=" KERNEL_UINT_FORMAT ", threads=%d",
+ netdata_configured_host_prefix, p->pid, pid_stat_comm(p), (p->target)?string2str(p->target->name):"UNSET",
+ p->stat_collected_usec - p->last_stat_collected_usec,
+ p->values[PDF_UTIME],
+ p->values[PDF_STIME],
+ p->values[PDF_CUTIME],
+ p->values[PDF_CSTIME],
+ p->values[PDF_MINFLT],
+ p->values[PDF_MAJFLT],
+ p->values[PDF_CMINFLT],
+ p->values[PDF_CMAJFLT],
+ p->values[PDF_THREADS]);
+
+ return true;
+
+cleanup:
+ return false;
+}
+
+bool apps_os_collect_all_pids_freebsd(void) {
+ // Mark all processes as unread before collecting new data
+ struct pid_stat *p = NULL;
+ int i, procnum;
+
+ static size_t procbase_size = 0;
+ static struct kinfo_proc *procbase = NULL;
+
+ size_t new_procbase_size;
+
+ int mib[3] = { CTL_KERN, KERN_PROC, KERN_PROC_PROC };
+ if (unlikely(sysctl(mib, 3, NULL, &new_procbase_size, NULL, 0))) {
+ netdata_log_error("sysctl error: Can't get processes data size");
+ return false;
+ }
+
+ // give it some air for processes that may be started
+ // during this little time.
+ new_procbase_size += 100 * sizeof(struct kinfo_proc);
+
+ // increase the buffer if needed
+ if(new_procbase_size > procbase_size) {
+ procbase_size = new_procbase_size;
+ procbase = reallocz(procbase, procbase_size);
+ }
+
+ // sysctl() gets from new_procbase_size the buffer size
+ // and also returns to it the amount of data filled in
+ new_procbase_size = procbase_size;
+
+ // get the processes from the system
+ if (unlikely(sysctl(mib, 3, procbase, &new_procbase_size, NULL, 0))) {
+ netdata_log_error("sysctl error: Can't get processes data");
+ return false;
+ }
+
+ // based on the amount of data filled in
+ // calculate the number of processes we got
+ procnum = new_procbase_size / sizeof(struct kinfo_proc);
+
+ get_current_time();
+
+ for (i = 0 ; i < procnum ; ++i) {
+ pid_t pid = procbase[i].ki_pid;
+ if (pid <= 0) continue;
+ incrementally_collect_data_for_pid(pid, &procbase[i]);
+ }
+
+ return true;
+}
+
+#endif
diff --git a/src/collectors/apps.plugin/apps_os_linux.c b/src/collectors/apps.plugin/apps_os_linux.c
new file mode 100644
index 000000000..824addfd6
--- /dev/null
+++ b/src/collectors/apps.plugin/apps_os_linux.c
@@ -0,0 +1,770 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "apps_plugin.h"
+
+#if defined(OS_LINUX)
+
+#define MAX_PROC_PID_LIMITS 8192
+#define PROC_PID_LIMITS_MAX_OPEN_FILES_KEY "\nMax open files "
+
+int max_fds_cache_seconds = 60;
+kernel_uint_t system_uptime_secs;
+
+void apps_os_init_linux(void) {
+ ;
+}
+
+// --------------------------------------------------------------------------------------------------------------------
+// /proc/pid/fd
+
+struct arl_callback_ptr {
+ struct pid_stat *p;
+ procfile *ff;
+ size_t line;
+};
+
+bool apps_os_read_pid_fds_linux(struct pid_stat *p, void *ptr __maybe_unused) {
+ if(unlikely(!p->fds_dirname)) {
+ char dirname[FILENAME_MAX+1];
+ snprintfz(dirname, FILENAME_MAX, "%s/proc/%d/fd", netdata_configured_host_prefix, p->pid);
+ p->fds_dirname = strdupz(dirname);
+ }
+
+ DIR *fds = opendir(p->fds_dirname);
+ if(unlikely(!fds)) return false;
+
+ struct dirent *de;
+ char linkname[FILENAME_MAX + 1];
+
+ // we make all pid fds negative, so that
+ // we can detect unused file descriptors
+ // at the end, to free them
+ make_all_pid_fds_negative(p);
+
+ while((de = readdir(fds))) {
+ // we need only files with numeric names
+
+ if(unlikely(de->d_name[0] < '0' || de->d_name[0] > '9'))
+ continue;
+
+ // get its number
+ int fdid = (int) str2l(de->d_name);
+ if(unlikely(fdid < 0)) continue;
+
+ // check if the fds array is small
+ if(unlikely((size_t)fdid >= p->fds_size)) {
+ // it is small, extend it
+
+ uint32_t new_size = fds_new_size(p->fds_size, fdid);
+
+ debug_log("extending fd memory slots for %s from %u to %u",
+ pid_stat_comm(p), p->fds_size, new_size);
+
+ p->fds = reallocz(p->fds, new_size * sizeof(struct pid_fd));
+
+ // and initialize it
+ init_pid_fds(p, p->fds_size, new_size - p->fds_size);
+ p->fds_size = new_size;
+ }
+
+ if(unlikely(p->fds[fdid].fd < 0 && de->d_ino != p->fds[fdid].inode)) {
+ // inodes do not match, clear the previous entry
+ inodes_changed_counter++;
+ file_descriptor_not_used(-p->fds[fdid].fd);
+ clear_pid_fd(&p->fds[fdid]);
+ }
+
+ if(p->fds[fdid].fd < 0 && p->fds[fdid].cache_iterations_counter > 0) {
+ p->fds[fdid].fd = -p->fds[fdid].fd;
+ p->fds[fdid].cache_iterations_counter--;
+ continue;
+ }
+
+ if(unlikely(!p->fds[fdid].filename)) {
+ filenames_allocated_counter++;
+ char fdname[FILENAME_MAX + 1];
+ snprintfz(fdname, FILENAME_MAX, "%s/proc/%d/fd/%s", netdata_configured_host_prefix, p->pid, de->d_name);
+ p->fds[fdid].filename = strdupz(fdname);
+ }
+
+ file_counter++;
+ ssize_t l = readlink(p->fds[fdid].filename, linkname, FILENAME_MAX);
+ if(unlikely(l == -1)) {
+ // cannot read the link
+
+ if(debug_enabled)
+ netdata_log_error("Cannot read link %s", p->fds[fdid].filename);
+
+ if(unlikely(p->fds[fdid].fd < 0)) {
+ file_descriptor_not_used(-p->fds[fdid].fd);
+ clear_pid_fd(&p->fds[fdid]);
+ }
+
+ continue;
+ }
+ else
+ linkname[l] = '\0';
+
+ uint32_t link_hash = simple_hash(linkname);
+
+ if(unlikely(p->fds[fdid].fd < 0 && p->fds[fdid].link_hash != link_hash)) {
+ // the link changed
+ links_changed_counter++;
+ file_descriptor_not_used(-p->fds[fdid].fd);
+ clear_pid_fd(&p->fds[fdid]);
+ }
+
+ if(unlikely(p->fds[fdid].fd == 0)) {
+ // we don't know this fd, get it
+
+ // if another process already has this, we will get
+ // the same id
+ p->fds[fdid].fd = (int)file_descriptor_find_or_add(linkname, link_hash);
+ p->fds[fdid].inode = de->d_ino;
+ p->fds[fdid].link_hash = link_hash;
+ }
+ else {
+ // else make it positive again, we need it
+ p->fds[fdid].fd = -p->fds[fdid].fd;
+ }
+
+ // caching control
+ // without this we read all the files on every iteration
+ if(max_fds_cache_seconds > 0) {
+ size_t spread = ((size_t)max_fds_cache_seconds > 10) ? 10 : (size_t)max_fds_cache_seconds;
+
+ // cache it for a few iterations
+ size_t max = ((size_t) max_fds_cache_seconds + (fdid % spread)) / (size_t) update_every;
+ p->fds[fdid].cache_iterations_reset++;
+
+ if(unlikely(p->fds[fdid].cache_iterations_reset % spread == (size_t) fdid % spread))
+ p->fds[fdid].cache_iterations_reset++;
+
+ if(unlikely((fdid <= 2 && p->fds[fdid].cache_iterations_reset > 5) ||
+ p->fds[fdid].cache_iterations_reset > max)) {
+ // for stdin, stdout, stderr (fdid <= 2) we have checked a few times, or if it goes above the max, goto max
+ p->fds[fdid].cache_iterations_reset = max;
+ }
+
+ p->fds[fdid].cache_iterations_counter = p->fds[fdid].cache_iterations_reset;
+ }
+ }
+
+ closedir(fds);
+
+ return true;
+}
+
+// --------------------------------------------------------------------------------------------------------------------
+// /proc/meminfo
+
+uint64_t apps_os_get_total_memory_linux(void) {
+ uint64_t ret = 0;
+
+ char filename[FILENAME_MAX + 1];
+ snprintfz(filename, FILENAME_MAX, "%s/proc/meminfo", netdata_configured_host_prefix);
+
+ procfile *ff = procfile_open(filename, ": \t", PROCFILE_FLAG_DEFAULT);
+ if(!ff)
+ return ret;
+
+ ff = procfile_readall(ff);
+ if(!ff)
+ return ret;
+
+ size_t line, lines = procfile_lines(ff);
+
+ for(line = 0; line < lines ;line++) {
+ size_t words = procfile_linewords(ff, line);
+ if(words == 3 && strcmp(procfile_lineword(ff, line, 0), "MemTotal") == 0 && strcmp(procfile_lineword(ff, line, 2), "kB") == 0) {
+ ret = str2ull(procfile_lineword(ff, line, 1), NULL) * 1024;
+ break;
+ }
+ }
+
+ procfile_close(ff);
+
+ return ret;
+}
+
+// --------------------------------------------------------------------------------------------------------------------
+// /proc/pid/cmdline
+
+bool apps_os_get_pid_cmdline_linux(struct pid_stat *p, char *cmdline, size_t bytes) {
+ if(unlikely(!p->cmdline_filename)) {
+ char filename[FILENAME_MAX];
+ snprintfz(filename, FILENAME_MAX, "%s/proc/%d/cmdline", netdata_configured_host_prefix, p->pid);
+ p->cmdline_filename = strdupz(filename);
+ }
+
+ int fd = open(p->cmdline_filename, procfile_open_flags, 0666);
+ if(unlikely(fd == -1))
+ return false;
+
+ ssize_t i, b = read(fd, cmdline, bytes - 1);
+ close(fd);
+
+ if(unlikely(b < 0))
+ return false;
+
+ cmdline[b] = '\0';
+ for(i = 0; i < b ; i++)
+ if(unlikely(!cmdline[i])) cmdline[i] = ' ';
+
+ // remove trailing spaces
+ while(b > 0 && cmdline[b - 1] == ' ')
+ cmdline[--b] = '\0';
+
+ return true;
+}
+
+// --------------------------------------------------------------------------------------------------------------------
+// /proc/pid/io
+
+bool apps_os_read_pid_io_linux(struct pid_stat *p, void *ptr __maybe_unused) {
+ static procfile *ff = NULL;
+
+ if(unlikely(!p->io_filename)) {
+ char filename[FILENAME_MAX + 1];
+ snprintfz(filename, FILENAME_MAX, "%s/proc/%d/io", netdata_configured_host_prefix, p->pid);
+ p->io_filename = strdupz(filename);
+ }
+
+ // open the file
+ ff = procfile_reopen(ff, p->io_filename, NULL, PROCFILE_FLAG_NO_ERROR_ON_FILE_IO);
+ if(unlikely(!ff)) goto cleanup;
+
+ ff = procfile_readall(ff);
+ if(unlikely(!ff)) goto cleanup;
+
+ pid_incremental_rate(io, PDF_LREAD, str2kernel_uint_t(procfile_lineword(ff, 0, 1)));
+ pid_incremental_rate(io, PDF_LWRITE, str2kernel_uint_t(procfile_lineword(ff, 1, 1)));
+ pid_incremental_rate(io, PDF_OREAD, str2kernel_uint_t(procfile_lineword(ff, 2, 1)));
+ pid_incremental_rate(io, PDF_OWRITE, str2kernel_uint_t(procfile_lineword(ff, 3, 1)));
+ pid_incremental_rate(io, PDF_PREAD, str2kernel_uint_t(procfile_lineword(ff, 4, 1)));
+ pid_incremental_rate(io, PDF_PWRITE, str2kernel_uint_t(procfile_lineword(ff, 5, 1)));
+
+ return true;
+
+cleanup:
+ return false;
+}
+
+// --------------------------------------------------------------------------------------------------------------------
+// /proc/pid/limits
+
+static inline kernel_uint_t get_proc_pid_limits_limit(char *buf, const char *key, size_t key_len, kernel_uint_t def) {
+ char *line = strstr(buf, key);
+ if(!line)
+ return def;
+
+ char *v = &line[key_len];
+ while(isspace((uint8_t)*v)) v++;
+
+ if(strcmp(v, "unlimited") == 0)
+ return 0;
+
+ return str2ull(v, NULL);
+}
+
+bool apps_os_read_pid_limits_linux(struct pid_stat *p, void *ptr __maybe_unused) {
+ static char proc_pid_limits_buffer[MAX_PROC_PID_LIMITS + 1];
+ bool ret = false;
+ bool read_limits = false;
+
+ errno_clear();
+ proc_pid_limits_buffer[0] = '\0';
+
+ kernel_uint_t all_fds = pid_openfds_sum(p);
+ if(all_fds < p->limits.max_open_files / 2 && p->io_collected_usec > p->last_limits_collected_usec && p->io_collected_usec - p->last_limits_collected_usec <= 60 * USEC_PER_SEC) {
+ // too frequent, we want to collect limits once per minute
+ ret = true;
+ goto cleanup;
+ }
+
+ if(unlikely(!p->limits_filename)) {
+ char filename[FILENAME_MAX + 1];
+ snprintfz(filename, FILENAME_MAX, "%s/proc/%d/limits", netdata_configured_host_prefix, p->pid);
+ p->limits_filename = strdupz(filename);
+ }
+
+ int fd = open(p->limits_filename, procfile_open_flags, 0666);
+ if(unlikely(fd == -1)) goto cleanup;
+
+ ssize_t bytes = read(fd, proc_pid_limits_buffer, MAX_PROC_PID_LIMITS);
+ close(fd);
+
+ if(bytes <= 0)
+ goto cleanup;
+
+ // make it '\0' terminated
+ if(bytes < MAX_PROC_PID_LIMITS)
+ proc_pid_limits_buffer[bytes] = '\0';
+ else
+ proc_pid_limits_buffer[MAX_PROC_PID_LIMITS - 1] = '\0';
+
+ p->limits.max_open_files = get_proc_pid_limits_limit(proc_pid_limits_buffer, PROC_PID_LIMITS_MAX_OPEN_FILES_KEY, sizeof(PROC_PID_LIMITS_MAX_OPEN_FILES_KEY) - 1, 0);
+ if(p->limits.max_open_files == 1) {
+ // it seems a bug in the kernel or something similar
+ // it sets max open files to 1 but the number of files
+ // the process has open are more than 1...
+ // https://github.com/netdata/netdata/issues/15443
+ p->limits.max_open_files = 0;
+ ret = true;
+ goto cleanup;
+ }
+
+ p->last_limits_collected_usec = p->io_collected_usec;
+ read_limits = true;
+
+ ret = true;
+
+cleanup:
+ if(p->limits.max_open_files)
+ p->openfds_limits_percent = (NETDATA_DOUBLE)all_fds * 100.0 / (NETDATA_DOUBLE)p->limits.max_open_files;
+ else
+ p->openfds_limits_percent = 0.0;
+
+ if(p->openfds_limits_percent > 100.0) {
+ if(!(p->log_thrown & PID_LOG_LIMITS_DETAIL)) {
+ char *line;
+
+ if(!read_limits) {
+ proc_pid_limits_buffer[0] = '\0';
+ line = "NOT READ";
+ }
+ else {
+ line = strstr(proc_pid_limits_buffer, PROC_PID_LIMITS_MAX_OPEN_FILES_KEY);
+ if (line) {
+ line++; // skip the initial newline
+
+ char *end = strchr(line, '\n');
+ if (end)
+ *end = '\0';
+ }
+ }
+
+ netdata_log_info(
+ "FDS_LIMITS: PID %d (%s) is using "
+ "%0.2f %% of its fds limits, "
+ "open fds = %"PRIu64 "("
+ "files = %"PRIu64 ", "
+ "pipes = %"PRIu64 ", "
+ "sockets = %"PRIu64", "
+ "inotifies = %"PRIu64", "
+ "eventfds = %"PRIu64", "
+ "timerfds = %"PRIu64", "
+ "signalfds = %"PRIu64", "
+ "eventpolls = %"PRIu64" "
+ "other = %"PRIu64" "
+ "), open fds limit = %"PRIu64", "
+ "%s, "
+ "original line [%s]",
+ p->pid, pid_stat_comm(p), p->openfds_limits_percent, all_fds,
+ p->openfds.files,
+ p->openfds.pipes,
+ p->openfds.sockets,
+ p->openfds.inotifies,
+ p->openfds.eventfds,
+ p->openfds.timerfds,
+ p->openfds.signalfds,
+ p->openfds.eventpolls,
+ p->openfds.other,
+ p->limits.max_open_files,
+ read_limits ? "and we have read the limits AFTER counting the fds"
+ : "but we have read the limits BEFORE counting the fds",
+ line);
+
+ p->log_thrown |= PID_LOG_LIMITS_DETAIL;
+ }
+ }
+ else
+ p->log_thrown &= ~PID_LOG_LIMITS_DETAIL;
+
+ return ret;
+}
+
+// --------------------------------------------------------------------------------------------------------------------
+// /proc/pid/status
+
+void arl_callback_status_uid(const char *name, uint32_t hash, const char *value, void *dst) {
+ (void)name; (void)hash; (void)value;
+ struct arl_callback_ptr *aptr = (struct arl_callback_ptr *)dst;
+ if(unlikely(procfile_linewords(aptr->ff, aptr->line) < 5)) return;
+
+ //const char *real_uid = procfile_lineword(aptr->ff, aptr->line, 1);
+ const char *effective_uid = procfile_lineword(aptr->ff, aptr->line, 2);
+ //const char *saved_uid = procfile_lineword(aptr->ff, aptr->line, 3);
+ //const char *filesystem_uid = procfile_lineword(aptr->ff, aptr->line, 4);
+
+ if(likely(effective_uid && *effective_uid))
+ aptr->p->uid = (uid_t)str2l(effective_uid);
+}
+
+void arl_callback_status_gid(const char *name, uint32_t hash, const char *value, void *dst) {
+ (void)name; (void)hash; (void)value;
+ struct arl_callback_ptr *aptr = (struct arl_callback_ptr *)dst;
+ if(unlikely(procfile_linewords(aptr->ff, aptr->line) < 5)) return;
+
+ //const char *real_gid = procfile_lineword(aptr->ff, aptr->line, 1);
+ const char *effective_gid = procfile_lineword(aptr->ff, aptr->line, 2);
+ //const char *saved_gid = procfile_lineword(aptr->ff, aptr->line, 3);
+ //const char *filesystem_gid = procfile_lineword(aptr->ff, aptr->line, 4);
+
+ if(likely(effective_gid && *effective_gid))
+ aptr->p->gid = (uid_t)str2l(effective_gid);
+}
+
+void arl_callback_status_vmsize(const char *name, uint32_t hash, const char *value, void *dst) {
+ (void)name; (void)hash; (void)value;
+ struct arl_callback_ptr *aptr = (struct arl_callback_ptr *)dst;
+ if(unlikely(procfile_linewords(aptr->ff, aptr->line) < 3)) return;
+
+ aptr->p->values[PDF_VMSIZE] = str2kernel_uint_t(procfile_lineword(aptr->ff, aptr->line, 1)) * 1024;
+}
+
+void arl_callback_status_vmswap(const char *name, uint32_t hash, const char *value, void *dst) {
+ (void)name; (void)hash; (void)value;
+ struct arl_callback_ptr *aptr = (struct arl_callback_ptr *)dst;
+ if(unlikely(procfile_linewords(aptr->ff, aptr->line) < 3)) return;
+
+ aptr->p->values[PDF_VMSWAP] = str2kernel_uint_t(procfile_lineword(aptr->ff, aptr->line, 1)) * 1024;
+}
+
+void arl_callback_status_vmrss(const char *name, uint32_t hash, const char *value, void *dst) {
+ (void)name; (void)hash; (void)value;
+ struct arl_callback_ptr *aptr = (struct arl_callback_ptr *)dst;
+ if(unlikely(procfile_linewords(aptr->ff, aptr->line) < 3)) return;
+
+ aptr->p->values[PDF_VMRSS] = str2kernel_uint_t(procfile_lineword(aptr->ff, aptr->line, 1)) * 1024;
+}
+
+void arl_callback_status_rssfile(const char *name, uint32_t hash, const char *value, void *dst) {
+ (void)name; (void)hash; (void)value;
+ struct arl_callback_ptr *aptr = (struct arl_callback_ptr *)dst;
+ if(unlikely(procfile_linewords(aptr->ff, aptr->line) < 3)) return;
+
+ aptr->p->values[PDF_RSSFILE] = str2kernel_uint_t(procfile_lineword(aptr->ff, aptr->line, 1)) * 1024;
+}
+
+void arl_callback_status_rssshmem(const char *name, uint32_t hash, const char *value, void *dst) {
+ (void)name; (void)hash; (void)value;
+ struct arl_callback_ptr *aptr = (struct arl_callback_ptr *)dst;
+ if(unlikely(procfile_linewords(aptr->ff, aptr->line) < 3)) return;
+
+ aptr->p->values[PDF_RSSSHMEM] = str2kernel_uint_t(procfile_lineword(aptr->ff, aptr->line, 1)) * 1024;
+}
+
+void arl_callback_status_voluntary_ctxt_switches(const char *name, uint32_t hash, const char *value, void *dst) {
+ (void)name; (void)hash; (void)value;
+ struct arl_callback_ptr *aptr = (struct arl_callback_ptr *)dst;
+ if(unlikely(procfile_linewords(aptr->ff, aptr->line) < 2)) return;
+
+ struct pid_stat *p = aptr->p;
+ pid_incremental_rate(stat, PDF_VOLCTX, str2kernel_uint_t(procfile_lineword(aptr->ff, aptr->line, 1)));
+}
+
+void arl_callback_status_nonvoluntary_ctxt_switches(const char *name, uint32_t hash, const char *value, void *dst) {
+ (void)name; (void)hash; (void)value;
+ struct arl_callback_ptr *aptr = (struct arl_callback_ptr *)dst;
+ if(unlikely(procfile_linewords(aptr->ff, aptr->line) < 2)) return;
+
+ struct pid_stat *p = aptr->p;
+ pid_incremental_rate(stat, PDF_NVOLCTX, str2kernel_uint_t(procfile_lineword(aptr->ff, aptr->line, 1)));
+}
+
+bool apps_os_read_pid_status_linux(struct pid_stat *p, void *ptr __maybe_unused) {
+ static struct arl_callback_ptr arl_ptr;
+ static procfile *ff = NULL;
+
+ if(unlikely(!p->status_arl)) {
+ p->status_arl = arl_create("/proc/pid/status", NULL, 60);
+ arl_expect_custom(p->status_arl, "Uid", arl_callback_status_uid, &arl_ptr);
+ arl_expect_custom(p->status_arl, "Gid", arl_callback_status_gid, &arl_ptr);
+ arl_expect_custom(p->status_arl, "VmSize", arl_callback_status_vmsize, &arl_ptr);
+ arl_expect_custom(p->status_arl, "VmRSS", arl_callback_status_vmrss, &arl_ptr);
+ arl_expect_custom(p->status_arl, "RssFile", arl_callback_status_rssfile, &arl_ptr);
+ arl_expect_custom(p->status_arl, "RssShmem", arl_callback_status_rssshmem, &arl_ptr);
+ arl_expect_custom(p->status_arl, "VmSwap", arl_callback_status_vmswap, &arl_ptr);
+ arl_expect_custom(p->status_arl, "voluntary_ctxt_switches", arl_callback_status_voluntary_ctxt_switches, &arl_ptr);
+ arl_expect_custom(p->status_arl, "nonvoluntary_ctxt_switches", arl_callback_status_nonvoluntary_ctxt_switches, &arl_ptr);
+ }
+
+ if(unlikely(!p->status_filename)) {
+ char filename[FILENAME_MAX + 1];
+ snprintfz(filename, FILENAME_MAX, "%s/proc/%d/status", netdata_configured_host_prefix, p->pid);
+ p->status_filename = strdupz(filename);
+ }
+
+ ff = procfile_reopen(ff, p->status_filename, (!ff)?" \t:,-()/":NULL, PROCFILE_FLAG_NO_ERROR_ON_FILE_IO);
+ if(unlikely(!ff)) return false;
+
+ ff = procfile_readall(ff);
+ if(unlikely(!ff)) return false;
+
+ calls_counter++;
+
+ // let ARL use this pid
+ arl_ptr.p = p;
+ arl_ptr.ff = ff;
+
+ size_t lines = procfile_lines(ff), l;
+ arl_begin(p->status_arl);
+
+ for(l = 0; l < lines ;l++) {
+ // debug_log("CHECK: line %zu of %zu, key '%s' = '%s'", l, lines, procfile_lineword(ff, l, 0), procfile_lineword(ff, l, 1));
+ arl_ptr.line = l;
+ if(unlikely(arl_check(p->status_arl,
+ procfile_lineword(ff, l, 0),
+ procfile_lineword(ff, l, 1)))) break;
+ }
+
+ p->values[PDF_VMSHARED] = p->values[PDF_RSSFILE] + p->values[PDF_RSSSHMEM];
+ return true;
+}
+
+// --------------------------------------------------------------------------------------------------------------------
+// global CPU utilization
+
+bool apps_os_read_global_cpu_utilization_linux(void) {
+ static char filename[FILENAME_MAX + 1] = "";
+ static procfile *ff = NULL;
+ static kernel_uint_t utime_raw = 0, stime_raw = 0, gtime_raw = 0, gntime_raw = 0, ntime_raw = 0;
+ static usec_t collected_usec = 0, last_collected_usec = 0;
+
+ if(unlikely(!ff)) {
+ snprintfz(filename, FILENAME_MAX, "%s/proc/stat", netdata_configured_host_prefix);
+ ff = procfile_open(filename, " \t:", PROCFILE_FLAG_DEFAULT);
+ if(unlikely(!ff)) goto cleanup;
+ }
+
+ ff = procfile_readall(ff);
+ if(unlikely(!ff)) goto cleanup;
+
+ last_collected_usec = collected_usec;
+ collected_usec = now_monotonic_usec();
+
+ calls_counter++;
+
+ // temporary - it is added global_ntime;
+ kernel_uint_t global_ntime = 0;
+
+ incremental_rate(global_utime, utime_raw, str2kernel_uint_t(procfile_lineword(ff, 0, 1)), collected_usec, last_collected_usec, CPU_TO_NANOSECONDCORES);
+ incremental_rate(global_ntime, ntime_raw, str2kernel_uint_t(procfile_lineword(ff, 0, 2)), collected_usec, last_collected_usec, CPU_TO_NANOSECONDCORES);
+ incremental_rate(global_stime, stime_raw, str2kernel_uint_t(procfile_lineword(ff, 0, 3)), collected_usec, last_collected_usec, CPU_TO_NANOSECONDCORES);
+ incremental_rate(global_gtime, gtime_raw, str2kernel_uint_t(procfile_lineword(ff, 0, 10)), collected_usec, last_collected_usec, CPU_TO_NANOSECONDCORES);
+
+ global_utime += global_ntime;
+
+ if(enable_guest_charts) {
+ // temporary - it is added global_ntime;
+ kernel_uint_t global_gntime = 0;
+
+ // guest nice time, on guest time
+ incremental_rate(global_gntime, gntime_raw, str2kernel_uint_t(procfile_lineword(ff, 0, 11)), collected_usec, last_collected_usec, 1);
+
+ global_gtime += global_gntime;
+
+ // remove guest time from user time
+ global_utime -= (global_utime > global_gtime) ? global_gtime : global_utime;
+ }
+
+ if(unlikely(global_iterations_counter == 1)) {
+ global_utime = 0;
+ global_stime = 0;
+ global_gtime = 0;
+ }
+
+ return true;
+
+cleanup:
+ global_utime = 0;
+ global_stime = 0;
+ global_gtime = 0;
+ return false;
+}
+
+// --------------------------------------------------------------------------------------------------------------------
+// /proc/pid/stat
+
+static inline void update_proc_state_count(char proc_stt) {
+ switch (proc_stt) {
+ case 'S':
+ proc_state_count[PROC_STATUS_SLEEPING] += 1;
+ break;
+ case 'R':
+ proc_state_count[PROC_STATUS_RUNNING] += 1;
+ break;
+ case 'D':
+ proc_state_count[PROC_STATUS_SLEEPING_D] += 1;
+ break;
+ case 'Z':
+ proc_state_count[PROC_STATUS_ZOMBIE] += 1;
+ break;
+ case 'T':
+ proc_state_count[PROC_STATUS_STOPPED] += 1;
+ break;
+ default:
+ break;
+ }
+}
+
+bool apps_os_read_pid_stat_linux(struct pid_stat *p, void *ptr __maybe_unused) {
+ static procfile *ff = NULL;
+
+ if(unlikely(!p->stat_filename)) {
+ char filename[FILENAME_MAX + 1];
+ snprintfz(filename, FILENAME_MAX, "%s/proc/%d/stat", netdata_configured_host_prefix, p->pid);
+ p->stat_filename = strdupz(filename);
+ }
+
+ bool set_quotes = (!ff) ? true : false;
+
+ ff = procfile_reopen(ff, p->stat_filename, NULL, PROCFILE_FLAG_NO_ERROR_ON_FILE_IO);
+ if(unlikely(!ff)) goto cleanup;
+
+ // if(set_quotes) procfile_set_quotes(ff, "()");
+ if(unlikely(set_quotes))
+ procfile_set_open_close(ff, "(", ")");
+
+ ff = procfile_readall(ff);
+ if(unlikely(!ff)) goto cleanup;
+
+ // p->pid = str2pid_t(procfile_lineword(ff, 0, 0));
+ char *comm = procfile_lineword(ff, 0, 1);
+ p->state = *(procfile_lineword(ff, 0, 2));
+ p->ppid = (int32_t)str2pid_t(procfile_lineword(ff, 0, 3));
+ // p->pgrp = (int32_t)str2pid_t(procfile_lineword(ff, 0, 4));
+ // p->session = (int32_t)str2pid_t(procfile_lineword(ff, 0, 5));
+ // p->tty_nr = (int32_t)str2pid_t(procfile_lineword(ff, 0, 6));
+ // p->tpgid = (int32_t)str2pid_t(procfile_lineword(ff, 0, 7));
+ // p->flags = str2uint64_t(procfile_lineword(ff, 0, 8));
+
+ update_pid_comm(p, comm);
+
+ pid_incremental_rate(stat, PDF_MINFLT, str2kernel_uint_t(procfile_lineword(ff, 0, 9)));
+ pid_incremental_rate(stat, PDF_CMINFLT, str2kernel_uint_t(procfile_lineword(ff, 0, 10)));
+ pid_incremental_rate(stat, PDF_MAJFLT, str2kernel_uint_t(procfile_lineword(ff, 0, 11)));
+ pid_incremental_rate(stat, PDF_CMAJFLT, str2kernel_uint_t(procfile_lineword(ff, 0, 12)));
+ pid_incremental_cpu(stat, PDF_UTIME, str2kernel_uint_t(procfile_lineword(ff, 0, 13)));
+ pid_incremental_cpu(stat, PDF_STIME, str2kernel_uint_t(procfile_lineword(ff, 0, 14)));
+ pid_incremental_cpu(stat, PDF_CUTIME, str2kernel_uint_t(procfile_lineword(ff, 0, 15)));
+ pid_incremental_cpu(stat, PDF_CSTIME, str2kernel_uint_t(procfile_lineword(ff, 0, 16)));
+ // p->priority = str2kernel_uint_t(procfile_lineword(ff, 0, 17));
+ // p->nice = str2kernel_uint_t(procfile_lineword(ff, 0, 18));
+ p->values[PDF_THREADS] = (int32_t) str2uint32_t(procfile_lineword(ff, 0, 19), NULL);
+ // p->itrealvalue = str2kernel_uint_t(procfile_lineword(ff, 0, 20));
+ kernel_uint_t collected_starttime = str2kernel_uint_t(procfile_lineword(ff, 0, 21)) / system_hz;
+ p->values[PDF_UPTIME] = (system_uptime_secs > collected_starttime)?(system_uptime_secs - collected_starttime):0;
+ // p->vsize = str2kernel_uint_t(procfile_lineword(ff, 0, 22));
+ // p->rss = str2kernel_uint_t(procfile_lineword(ff, 0, 23));
+ // p->rsslim = str2kernel_uint_t(procfile_lineword(ff, 0, 24));
+ // p->starcode = str2kernel_uint_t(procfile_lineword(ff, 0, 25));
+ // p->endcode = str2kernel_uint_t(procfile_lineword(ff, 0, 26));
+ // p->startstack = str2kernel_uint_t(procfile_lineword(ff, 0, 27));
+ // p->kstkesp = str2kernel_uint_t(procfile_lineword(ff, 0, 28));
+ // p->kstkeip = str2kernel_uint_t(procfile_lineword(ff, 0, 29));
+ // p->signal = str2kernel_uint_t(procfile_lineword(ff, 0, 30));
+ // p->blocked = str2kernel_uint_t(procfile_lineword(ff, 0, 31));
+ // p->sigignore = str2kernel_uint_t(procfile_lineword(ff, 0, 32));
+ // p->sigcatch = str2kernel_uint_t(procfile_lineword(ff, 0, 33));
+ // p->wchan = str2kernel_uint_t(procfile_lineword(ff, 0, 34));
+ // p->nswap = str2kernel_uint_t(procfile_lineword(ff, 0, 35));
+ // p->cnswap = str2kernel_uint_t(procfile_lineword(ff, 0, 36));
+ // p->exit_signal = str2kernel_uint_t(procfile_lineword(ff, 0, 37));
+ // p->processor = str2kernel_uint_t(procfile_lineword(ff, 0, 38));
+ // p->rt_priority = str2kernel_uint_t(procfile_lineword(ff, 0, 39));
+ // p->policy = str2kernel_uint_t(procfile_lineword(ff, 0, 40));
+ // p->delayacct_blkio_ticks = str2kernel_uint_t(procfile_lineword(ff, 0, 41));
+
+ if(enable_guest_charts) {
+ pid_incremental_cpu(stat, PDF_GTIME, str2kernel_uint_t(procfile_lineword(ff, 0, 42)));
+ pid_incremental_cpu(stat, PDF_CGTIME, str2kernel_uint_t(procfile_lineword(ff, 0, 43)));
+
+ if (show_guest_time || p->values[PDF_GTIME] || p->values[PDF_CGTIME]) {
+ p->values[PDF_UTIME] -= (p->values[PDF_UTIME] >= p->values[PDF_GTIME]) ? p->values[PDF_GTIME] : p->values[PDF_UTIME];
+ p->values[PDF_CUTIME] -= (p->values[PDF_CUTIME] >= p->values[PDF_CGTIME]) ? p->values[PDF_CGTIME] : p->values[PDF_CUTIME];
+ show_guest_time = true;
+ }
+ }
+
+ if(unlikely(debug_enabled))
+ debug_log_int("READ PROC/PID/STAT: %s/proc/%d/stat, process: '%s' on target '%s' (dt=%llu) VALUES: utime=" KERNEL_UINT_FORMAT ", stime=" KERNEL_UINT_FORMAT ", cutime=" KERNEL_UINT_FORMAT ", cstime=" KERNEL_UINT_FORMAT ", minflt=" KERNEL_UINT_FORMAT ", majflt=" KERNEL_UINT_FORMAT ", cminflt=" KERNEL_UINT_FORMAT ", cmajflt=" KERNEL_UINT_FORMAT ", threads=" KERNEL_UINT_FORMAT,
+ netdata_configured_host_prefix, p->pid, pid_stat_comm(p), (p->target)?string2str(p->target->name):"UNSET", p->stat_collected_usec - p->last_stat_collected_usec,
+ p->values[PDF_UTIME],
+ p->values[PDF_STIME],
+ p->values[PDF_CUTIME],
+ p->values[PDF_CSTIME],
+ p->values[PDF_MINFLT],
+ p->values[PDF_MAJFLT],
+ p->values[PDF_CMINFLT],
+ p->values[PDF_CMAJFLT],
+ p->values[PDF_THREADS]);
+
+ update_proc_state_count(p->state);
+ return true;
+
+cleanup:
+ return false;
+}
+
+// ----------------------------------------------------------------------------
+
+// 1. read all files in /proc
+// 2. for each numeric directory:
+// i. read /proc/pid/stat
+// ii. read /proc/pid/status
+// iii. read /proc/pid/io (requires root access)
+// iii. read the entries in directory /proc/pid/fd (requires root access)
+// for each entry:
+// a. find or create a struct file_descriptor
+// b. cleanup any old/unused file_descriptors
+
+// after all these, some pids may be linked to targets, while others may not
+
+// in case of errors, only 1 every 1000 errors is printed
+// to avoid filling up all disk space
+// if debug is enabled, all errors are printed
+
+bool apps_os_collect_all_pids_linux(void) {
+#if (PROCESSES_HAVE_STATE == 1)
+ // clear process state counter
+ memset(proc_state_count, 0, sizeof proc_state_count);
+#endif
+
+ // preload the parents and then their children
+ collect_parents_before_children();
+
+ static char uptime_filename[FILENAME_MAX + 1] = "";
+ if(*uptime_filename == '\0')
+ snprintfz(uptime_filename, FILENAME_MAX, "%s/proc/uptime", netdata_configured_host_prefix);
+
+ system_uptime_secs = (kernel_uint_t)(uptime_msec(uptime_filename) / MSEC_PER_SEC);
+
+ char dirname[FILENAME_MAX + 1];
+
+ snprintfz(dirname, FILENAME_MAX, "%s/proc", netdata_configured_host_prefix);
+ DIR *dir = opendir(dirname);
+ if(!dir) return false;
+
+ struct dirent *de = NULL;
+
+ while((de = readdir(dir))) {
+ char *endptr = de->d_name;
+
+ if(unlikely(de->d_type != DT_DIR || de->d_name[0] < '0' || de->d_name[0] > '9'))
+ continue;
+
+ pid_t pid = (pid_t) strtoul(de->d_name, &endptr, 10);
+
+ // make sure we read a valid number
+ if(unlikely(endptr == de->d_name || *endptr != '\0'))
+ continue;
+
+ incrementally_collect_data_for_pid(pid, NULL);
+ }
+ closedir(dir);
+
+ return true;
+}
+#endif
diff --git a/src/collectors/apps.plugin/apps_os_macos.c b/src/collectors/apps.plugin/apps_os_macos.c
new file mode 100644
index 000000000..27fb0ca7f
--- /dev/null
+++ b/src/collectors/apps.plugin/apps_os_macos.c
@@ -0,0 +1,334 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "apps_plugin.h"
+
+#if defined(OS_MACOS)
+
+usec_t system_current_time_ut;
+mach_timebase_info_data_t mach_info;
+
+void apps_os_init_macos(void) {
+ mach_timebase_info(&mach_info);
+}
+
+uint64_t apps_os_get_total_memory_macos(void) {
+ uint64_t ret = 0;
+ int mib[2] = {CTL_HW, HW_MEMSIZE};
+ size_t size = sizeof(ret);
+ if (sysctl(mib, 2, &ret, &size, NULL, 0) == -1) {
+ netdata_log_error("Failed to get total memory using sysctl");
+ return 0;
+ }
+
+ return ret;
+}
+
+bool apps_os_read_pid_fds_macos(struct pid_stat *p, void *ptr __maybe_unused) {
+ static struct proc_fdinfo *fds = NULL;
+ static int fdsCapacity = 0;
+
+ int bufferSize = proc_pidinfo(p->pid, PROC_PIDLISTFDS, 0, NULL, 0);
+ if (bufferSize <= 0) {
+ netdata_log_error("Failed to get the size of file descriptors for PID %d", p->pid);
+ return false;
+ }
+
+ // Resize buffer if necessary
+ if (bufferSize > fdsCapacity) {
+ if(fds)
+ freez(fds);
+
+ fds = mallocz(bufferSize);
+ fdsCapacity = bufferSize;
+ }
+
+ int num_fds = proc_pidinfo(p->pid, PROC_PIDLISTFDS, 0, fds, bufferSize) / PROC_PIDLISTFD_SIZE;
+ if (num_fds <= 0) {
+ netdata_log_error("Failed to get the file descriptors for PID %d", p->pid);
+ return false;
+ }
+
+ for (int i = 0; i < num_fds; i++) {
+ switch (fds[i].proc_fdtype) {
+ case PROX_FDTYPE_VNODE: {
+ struct vnode_fdinfowithpath vi;
+ if (proc_pidfdinfo(p->pid, fds[i].proc_fd, PROC_PIDFDVNODEPATHINFO, &vi, sizeof(vi)) > 0)
+ p->openfds.files++;
+ else
+ p->openfds.other++;
+
+ break;
+ }
+ case PROX_FDTYPE_SOCKET: {
+ p->openfds.sockets++;
+ break;
+ }
+ case PROX_FDTYPE_PIPE: {
+ p->openfds.pipes++;
+ break;
+ }
+
+ default:
+ p->openfds.other++;
+ break;
+ }
+ }
+
+ return true;
+}
+
+bool apps_os_get_pid_cmdline_macos(struct pid_stat *p, char *cmdline, size_t maxBytes) {
+ int mib[3] = {CTL_KERN, KERN_PROCARGS2, p->pid};
+ static char *args = NULL;
+ static size_t size = 0;
+
+ size_t new_size;
+ if (sysctl(mib, 3, NULL, &new_size, NULL, 0) == -1) {
+ return false;
+ }
+
+ if (new_size > size) {
+ if (args)
+ freez(args);
+
+ args = (char *)mallocz(new_size);
+ size = new_size;
+ }
+
+ memset(cmdline, 0, new_size < maxBytes ? new_size : maxBytes);
+
+ size_t used_size = size;
+ if (sysctl(mib, 3, args, &used_size, NULL, 0) == -1)
+ return false;
+
+ int argc;
+ memcpy(&argc, args, sizeof(argc));
+ char *ptr = args + sizeof(argc);
+ used_size -= sizeof(argc);
+
+ // Skip the executable path
+ while (*ptr && used_size > 0) {
+ ptr++;
+ used_size--;
+ }
+
+ // Copy only the arguments to the cmdline buffer, skipping the environment variables
+ size_t i = 0, copied_args = 0;
+ bool inArg = false;
+ for (; used_size > 0 && i < maxBytes - 1 && copied_args < argc; --used_size, ++ptr) {
+ if (*ptr == '\0') {
+ if (inArg) {
+ cmdline[i++] = ' '; // Replace nulls between arguments with spaces
+ inArg = false;
+ copied_args++;
+ }
+ } else {
+ cmdline[i++] = *ptr;
+ inArg = true;
+ }
+ }
+
+ if (i > 0 && cmdline[i - 1] == ' ')
+ i--; // Remove the trailing space if present
+
+ cmdline[i] = '\0'; // Null-terminate the string
+
+ return true;
+}
+
+bool apps_os_read_pid_io_macos(struct pid_stat *p, void *ptr) {
+ struct pid_info *pi = ptr;
+
+ // On MacOS, the proc_pid_rusage provides disk_io_statistics which includes io bytes read and written
+ // but does not provide the same level of detail as Linux, like separating logical and physical I/O bytes.
+ pid_incremental_rate(io, PDF_LREAD, pi->rusageinfo.ri_diskio_bytesread);
+ pid_incremental_rate(io, PDF_LWRITE, pi->rusageinfo.ri_diskio_byteswritten);
+
+ return true;
+}
+
+bool apps_os_read_pid_limits_macos(struct pid_stat *p __maybe_unused, void *ptr __maybe_unused) {
+ return false;
+}
+
+bool apps_os_read_pid_status_macos(struct pid_stat *p, void *ptr) {
+ struct pid_info *pi = ptr;
+
+ p->uid = pi->bsdinfo.pbi_uid;
+ p->gid = pi->bsdinfo.pbi_gid;
+ p->values[PDF_VMSIZE] = pi->taskinfo.pti_virtual_size;
+ p->values[PDF_VMRSS] = pi->taskinfo.pti_resident_size;
+ // p->values[PDF_VMSWAP] = rusageinfo.ri_swapins + rusageinfo.ri_swapouts; // This is not directly available, consider an alternative representation
+ p->values[PDF_VOLCTX] = pi->taskinfo.pti_csw;
+ // p->values[PDF_NVOLCTX] = taskinfo.pti_nivcsw;
+
+ return true;
+}
+
+static inline void get_current_time(void) {
+ struct timeval current_time;
+ gettimeofday(&current_time, NULL);
+ system_current_time_ut = timeval_usec(&current_time);
+}
+
+// bool apps_os_read_global_cpu_utilization_macos(void) {
+// static kernel_uint_t utime_raw = 0, stime_raw = 0, ntime_raw = 0;
+// static usec_t collected_usec = 0, last_collected_usec = 0;
+//
+// host_cpu_load_info_data_t cpuinfo;
+// mach_msg_type_number_t count = HOST_CPU_LOAD_INFO_COUNT;
+//
+// if (host_statistics(mach_host_self(), HOST_CPU_LOAD_INFO, (host_info_t)&cpuinfo, &count) != KERN_SUCCESS) {
+// // Handle error
+// goto cleanup;
+// }
+//
+// last_collected_usec = collected_usec;
+// collected_usec = now_monotonic_usec();
+//
+// calls_counter++;
+//
+// // Convert ticks to time
+// // Note: MacOS does not separate nice time from user time in the CPU stats, so you might need to adjust this logic
+// kernel_uint_t global_ntime = 0; // Assuming you want to keep track of nice time separately
+//
+// incremental_rate(global_utime, utime_raw, cpuinfo.cpu_ticks[CPU_STATE_USER] + cpuinfo.cpu_ticks[CPU_STATE_NICE], collected_usec, last_collected_usec, CPU_TO_NANOSECONDCORES);
+// incremental_rate(global_ntime, ntime_raw, cpuinfo.cpu_ticks[CPU_STATE_NICE], collected_usec, last_collected_usec, CPU_TO_NANOSECONDCORES);
+// incremental_rate(global_stime, stime_raw, cpuinfo.cpu_ticks[CPU_STATE_SYSTEM], collected_usec, last_collected_usec, CPU_TO_NANOSECONDCORES);
+//
+// global_utime += global_ntime;
+//
+// if(unlikely(global_iterations_counter == 1)) {
+// global_utime = 0;
+// global_stime = 0;
+// global_gtime = 0;
+// }
+//
+// return 1;
+//
+// cleanup:
+// global_utime = 0;
+// global_stime = 0;
+// global_gtime = 0;
+// return 0;
+// }
+
+bool apps_os_read_pid_stat_macos(struct pid_stat *p, void *ptr) {
+ struct pid_info *pi = ptr;
+
+ p->ppid = pi->proc.kp_eproc.e_ppid;
+
+ // Update command name and target if changed
+ char comm[PROC_PIDPATHINFO_MAXSIZE];
+ int ret = proc_name(p->pid, comm, sizeof(comm));
+ if (ret <= 0)
+ strncpyz(comm, "unknown", sizeof(comm) - 1);
+
+ update_pid_comm(p, comm);
+
+ kernel_uint_t userCPU = (pi->taskinfo.pti_total_user * mach_info.numer) / mach_info.denom;
+ kernel_uint_t systemCPU = (pi->taskinfo.pti_total_system * mach_info.numer) / mach_info.denom;
+
+ // Map the values from taskinfo to the pid_stat structure
+ pid_incremental_rate(stat, PDF_MINFLT, pi->taskinfo.pti_faults);
+ pid_incremental_rate(stat, PDF_MAJFLT, pi->taskinfo.pti_pageins);
+ pid_incremental_cpu(stat, PDF_UTIME, userCPU);
+ pid_incremental_cpu(stat, PDF_STIME, systemCPU);
+ p->values[PDF_THREADS] = pi->taskinfo.pti_threadnum;
+
+ usec_t started_ut = timeval_usec(&pi->proc.kp_proc.p_starttime);
+ p->values[PDF_UPTIME] = (system_current_time_ut > started_ut) ? (system_current_time_ut - started_ut) / USEC_PER_SEC : 0;
+
+ // Note: Some values such as guest time, cutime, cstime, etc., are not directly available in MacOS.
+ // You might need to approximate or leave them unset depending on your needs.
+
+ if(unlikely(debug_enabled)) {
+ debug_log_int("READ PROC/PID/STAT for MacOS: process: '%s' on target '%s' VALUES: utime=" KERNEL_UINT_FORMAT ", stime=" KERNEL_UINT_FORMAT ", minflt=" KERNEL_UINT_FORMAT ", majflt=" KERNEL_UINT_FORMAT ", threads=%d",
+ pid_stat_comm(p), (p->target) ? string2str(p->target->name) : "UNSET",
+ p->values[PDF_UTIME],
+ p->values[PDF_STIME],
+ p->values[PDF_MINFLT],
+ p->values[PDF_MAJFLT],
+ p->values[PDF_THREADS]);
+ }
+
+ // MacOS doesn't have a direct concept of process state like Linux,
+ // so updating process state count might need a different approach.
+
+ return true;
+}
+
+bool apps_os_collect_all_pids_macos(void) {
+ // Mark all processes as unread before collecting new data
+ struct pid_stat *p;
+ static pid_t *pids = NULL;
+ static int allocatedProcessCount = 0;
+
+ // Get the number of processes
+ int numberOfProcesses = proc_listpids(PROC_ALL_PIDS, 0, NULL, 0);
+ if (numberOfProcesses <= 0) {
+ netdata_log_error("Failed to retrieve the process count");
+ return false;
+ }
+
+ // Allocate or reallocate space to hold all the process IDs if necessary
+ if (numberOfProcesses > allocatedProcessCount) {
+ // Allocate additional space to avoid frequent reallocations
+ allocatedProcessCount = numberOfProcesses + 100;
+ pids = reallocz(pids, allocatedProcessCount * sizeof(pid_t));
+ }
+
+ // this is required, otherwise the PIDs become totally random
+ memset(pids, 0, allocatedProcessCount * sizeof(pid_t));
+
+ // get the list of PIDs
+ numberOfProcesses = proc_listpids(PROC_ALL_PIDS, 0, pids, allocatedProcessCount * sizeof(pid_t));
+ if (numberOfProcesses <= 0) {
+ netdata_log_error("Failed to retrieve the process IDs");
+ return false;
+ }
+
+ get_current_time();
+
+ // Collect data for each process
+ for (int i = 0; i < numberOfProcesses; ++i) {
+ pid_t pid = pids[i];
+ if (pid <= 0) continue;
+
+ struct pid_info pi = { 0 };
+
+ int mib[4] = {CTL_KERN, KERN_PROC, KERN_PROC_PID, pid};
+
+ size_t procSize = sizeof(pi.proc);
+ if(sysctl(mib, 4, &pi.proc, &procSize, NULL, 0) == -1) {
+ netdata_log_error("Failed to get proc for PID %d", pid);
+ continue;
+ }
+ if(procSize == 0) // no such process
+ continue;
+
+ int st = proc_pidinfo(pid, PROC_PIDTASKINFO, 0, &pi.taskinfo, sizeof(pi.taskinfo));
+ if (st <= 0) {
+ netdata_log_error("Failed to get task info for PID %d", pid);
+ continue;
+ }
+
+ st = proc_pidinfo(pid, PROC_PIDTBSDINFO, 0, &pi.bsdinfo, sizeof(pi.bsdinfo));
+ if (st <= 0) {
+ netdata_log_error("Failed to get BSD info for PID %d", pid);
+ continue;
+ }
+
+ st = proc_pid_rusage(pid, RUSAGE_INFO_V4, (rusage_info_t *)&pi.rusageinfo);
+ if (st < 0) {
+ netdata_log_error("Failed to get resource usage info for PID %d", pid);
+ continue;
+ }
+
+ incrementally_collect_data_for_pid(pid, &pi);
+ }
+
+ return true;
+}
+
+#endif
diff --git a/src/collectors/apps.plugin/apps_os_windows.c b/src/collectors/apps.plugin/apps_os_windows.c
new file mode 100644
index 000000000..6c2cabc50
--- /dev/null
+++ b/src/collectors/apps.plugin/apps_os_windows.c
@@ -0,0 +1,1011 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "apps_plugin.h"
+/*
+{
+ "SystemName": "WIN11",
+ "NumObjectTypes": 1,
+ "LittleEndian": 1,
+ "Version": 1,
+ "Revision": 1,
+ "DefaultObject": 238,
+ "PerfFreq": 10000000,
+ "PerfTime": 9242655165203,
+ "PerfTime100nSec": 133716612800215149,
+ "SystemTime": {
+ "Year": 2024,
+ "Month": 9,
+ "DayOfWeek": 2,
+ "Day": 24,
+ "Hour": 14,
+ "Minute": 21,
+ "Second": 20,
+ "Milliseconds": 21
+ },
+ "Objects": [
+ {
+ "NameId": 230,
+ "Name": "Process",
+ "HelpId": 231,
+ "Help": "The Process performance object consists of counters that monitor running application program and system processes. All the threads in a process share the same address space and have access to the same data.",
+ "NumInstances": 274,
+ "NumCounters": 28,
+ "PerfTime": 133716612800215149,
+ "PerfFreq": 10000000,
+ "CodePage": 0,
+ "DefaultCounter": 0,
+ "DetailLevel": "Novice (100)",
+ "Instances": [
+ {
+ "Instance": "Idle",
+ "UniqueID": -1,
+ "Labels": [
+ {
+ "key": "Process",
+ "value": "Idle"
+ }
+ ],
+ "Counters": [
+ {
+ "Counter": "% Processor Time",
+ "Value": {
+ "data": 106881107812500,
+ "time": 133716612800215149,
+ "type": 542180608,
+ "multi": 0,
+ "frequency": 0
+ },
+ "Help": "% Processor Time is the percentage of elapsed time that all of process threads used the processor to execution instructions. An instruction is the basic unit of execution in a computer, a thread is the object that executes instructions, and a process is the object created when a program is run. Code executed to handle some hardware interrupts and trap conditions are included in this count.",
+ "Type": "PERF_100NSEC_TIMER",
+ "Algorithm": "100 * (data1 - data0) / (time1 - time0)",
+ "Description": "64-bit Timer in 100 nsec units. Display delta divided by delta time. Display suffix: \"%\""
+ },
+ {
+ "Counter": "% User Time",
+ "Value": {
+ "data": 0,
+ "time": 133716612800215149,
+ "type": 542180608,
+ "multi": 0,
+ "frequency": 0
+ },
+ "Help": "% User Time is the percentage of elapsed time that the process threads spent executing code in user mode. Applications, environment subsystems, and integral subsystems execute in user mode. Code executing in user mode cannot damage the integrity of the Windows executive, kernel, and device drivers. Unlike some early operating systems, Windows uses process boundaries for subsystem protection in addition to the traditional protection of user and privileged modes. Some work done by Windows on behalf of the application might appear in other subsystem processes in addition to the privileged time in the process.",
+ "Type": "PERF_100NSEC_TIMER",
+ "Algorithm": "100 * (data1 - data0) / (time1 - time0)",
+ "Description": "64-bit Timer in 100 nsec units. Display delta divided by delta time. Display suffix: \"%\""
+ },
+ {
+ "Counter": "% Privileged Time",
+ "Value": {
+ "data": 106881107812500,
+ "time": 133716612800215149,
+ "type": 542180608,
+ "multi": 0,
+ "frequency": 0
+ },
+ "Help": "% Privileged Time is the percentage of elapsed time that the process threads spent executing code in privileged mode. When a Windows system service is called, the service will often run in privileged mode to gain access to system-private data. Such data is protected from access by threads executing in user mode. Calls to the system can be explicit or implicit, such as page faults or interrupts. Unlike some early operating systems, Windows uses process boundaries for subsystem protection in addition to the traditional protection of user and privileged modes. Some work done by Windows on behalf of the application might appear in other subsystem processes in addition to the privileged time in the process.",
+ "Type": "PERF_100NSEC_TIMER",
+ "Algorithm": "100 * (data1 - data0) / (time1 - time0)",
+ "Description": "64-bit Timer in 100 nsec units. Display delta divided by delta time. Display suffix: \"%\""
+ },
+ {
+ "Counter": "Virtual Bytes Peak",
+ "Value": {
+ "data": 8192,
+ "time": 0,
+ "type": 65792,
+ "multi": 0,
+ "frequency": 0
+ },
+ "Help": "Virtual Bytes Peak is the maximum size, in bytes, of virtual address space the process has used at any one time. Use of virtual address space does not necessarily imply corresponding use of either disk or main memory pages. However, virtual space is finite, and the process might limit its ability to load libraries.",
+ "Type": "PERF_COUNTER_LARGE_RAWCOUNT",
+ "Algorithm": "data0",
+ "Description": "A counter which should not be time averaged on display (such as an error counter on a serial line). Display as is. No Display Suffix."
+ },
+ {
+ "Counter": "Virtual Bytes",
+ "Value": {
+ "data": 8192,
+ "time": 0,
+ "type": 65792,
+ "multi": 0,
+ "frequency": 0
+ },
+ "Help": "Virtual Bytes is the current size, in bytes, of the virtual address space the process is using. Use of virtual address space does not necessarily imply corresponding use of either disk or main memory pages. Virtual space is finite, and the process can limit its ability to load libraries.",
+ "Type": "PERF_COUNTER_LARGE_RAWCOUNT",
+ "Algorithm": "data0",
+ "Description": "A counter which should not be time averaged on display (such as an error counter on a serial line). Display as is. No Display Suffix."
+ },
+ {
+ "Counter": "Page Faults/sec",
+ "Value": {
+ "data": 9,
+ "time": 9242655165203,
+ "type": 272696320,
+ "multi": 0,
+ "frequency": 10000000
+ },
+ "Help": "Page Faults/sec is the rate at which page faults by the threads executing in this process are occurring. A page fault occurs when a thread refers to a virtual memory page that is not in its working set in main memory. This may not cause the page to be fetched from disk if it is on the standby list and hence already in main memory, or if it is in use by another process with whom the page is shared.",
+ "Type": "PERF_COUNTER_COUNTER",
+ "Algorithm": "(data1 - data0) / ((time1 - time0) / frequency)",
+ "Description": "32-bit Counter. Divide delta by delta time. Display suffix: \"/sec\""
+ },
+ {
+ "Counter": "Working Set Peak",
+ "Value": {
+ "data": 8192,
+ "time": 0,
+ "type": 65792,
+ "multi": 0,
+ "frequency": 0
+ },
+ "Help": "Working Set Peak is the maximum size, in bytes, of the Working Set of this process at any point in time. The Working Set is the set of memory pages touched recently by the threads in the process. If free memory in the computer is above a threshold, pages are left in the Working Set of a process even if they are not in use. When free memory falls below a threshold, pages are trimmed from Working Sets. If they are needed they will then be soft-faulted back into the Working Set before they leave main memory.",
+ "Type": "PERF_COUNTER_LARGE_RAWCOUNT",
+ "Algorithm": "data0",
+ "Description": "A counter which should not be time averaged on display (such as an error counter on a serial line). Display as is. No Display Suffix."
+ },
+ {
+ "Counter": "Working Set",
+ "Value": {
+ "data": 8192,
+ "time": 0,
+ "type": 65792,
+ "multi": 0,
+ "frequency": 0
+ },
+ "Help": "Working Set is the current size, in bytes, of the Working Set of this process. The Working Set is the set of memory pages touched recently by the threads in the process. If free memory in the computer is above a threshold, pages are left in the Working Set of a process even if they are not in use. When free memory falls below a threshold, pages are trimmed from Working Sets. If they are needed they will then be soft-faulted back into the Working Set before leaving main memory.",
+ "Type": "PERF_COUNTER_LARGE_RAWCOUNT",
+ "Algorithm": "data0",
+ "Description": "A counter which should not be time averaged on display (such as an error counter on a serial line). Display as is. No Display Suffix."
+ },
+ {
+ "Counter": "Page File Bytes Peak",
+ "Value": {
+ "data": 61440,
+ "time": 0,
+ "type": 65792,
+ "multi": 0,
+ "frequency": 0
+ },
+ "Help": "Page File Bytes Peak is the maximum amount of virtual memory, in bytes, that this process has reserved for use in the paging file(s). Paging files are used to store pages of memory used by the process that are not contained in other files. Paging files are shared by all processes, and the lack of space in paging files can prevent other processes from allocating memory. If there is no paging file, this counter reflects the maximum amount of virtual memory that the process has reserved for use in physical memory.",
+ "Type": "PERF_COUNTER_LARGE_RAWCOUNT",
+ "Algorithm": "data0",
+ "Description": "A counter which should not be time averaged on display (such as an error counter on a serial line). Display as is. No Display Suffix."
+ },
+ {
+ "Counter": "Page File Bytes",
+ "Value": {
+ "data": 61440,
+ "time": 0,
+ "type": 65792,
+ "multi": 0,
+ "frequency": 0
+ },
+ "Help": "Page File Bytes is the current amount of virtual memory, in bytes, that this process has reserved for use in the paging file(s). Paging files are used to store pages of memory used by the process that are not contained in other files. Paging files are shared by all processes, and the lack of space in paging files can prevent other processes from allocating memory. If there is no paging file, this counter reflects the current amount of virtual memory that the process has reserved for use in physical memory.",
+ "Type": "PERF_COUNTER_LARGE_RAWCOUNT",
+ "Algorithm": "data0",
+ "Description": "A counter which should not be time averaged on display (such as an error counter on a serial line). Display as is. No Display Suffix."
+ },
+ {
+ "Counter": "Private Bytes",
+ "Value": {
+ "data": 61440,
+ "time": 0,
+ "type": 65792,
+ "multi": 0,
+ "frequency": 0
+ },
+ "Help": "Private Bytes is the current size, in bytes, of memory that this process has allocated that cannot be shared with other processes.",
+ "Type": "PERF_COUNTER_LARGE_RAWCOUNT",
+ "Algorithm": "data0",
+ "Description": "A counter which should not be time averaged on display (such as an error counter on a serial line). Display as is. No Display Suffix."
+ },
+ {
+ "Counter": "Thread Count",
+ "Value": {
+ "data": 24,
+ "time": 0,
+ "type": 65536,
+ "multi": 0,
+ "frequency": 0
+ },
+ "Help": "The number of threads currently active in this process. An instruction is the basic unit of execution in a processor, and a thread is the object that executes instructions. Every running process has at least one thread.",
+ "Type": "PERF_COUNTER_RAWCOUNT",
+ "Algorithm": "data0",
+ "Description": "A counter which should not be time averaged on display (such as an error counter on a serial line). Display as is. No Display Suffix."
+ },
+ {
+ "Counter": "Priority Base",
+ "Value": {
+ "data": 0,
+ "time": 0,
+ "type": 65536,
+ "multi": 0,
+ "frequency": 0
+ },
+ "Help": "The current base priority of this process. Threads within a process can raise and lower their own base priority relative to the process' base priority.",
+ "Type": "PERF_COUNTER_RAWCOUNT",
+ "Algorithm": "data0",
+ "Description": "A counter which should not be time averaged on display (such as an error counter on a serial line). Display as is. No Display Suffix."
+ },
+ {
+ "Counter": "Elapsed Time",
+ "Value": {
+ "data": 133707369666486855,
+ "time": 133716612800215149,
+ "type": 807666944,
+ "multi": 0,
+ "frequency": 10000000
+ },
+ "Help": "The total elapsed time, in seconds, that this process has been running.",
+ "Type": "PERF_ELAPSED_TIME",
+ "Algorithm": "(time0 - data0) / frequency0",
+ "Description": "The data collected in this counter is actually the start time of the item being measured. For display, this data is subtracted from the sample time to yield the elapsed time as the difference between the two. In the definition below, the PerfTime field of the Object contains the sample time as indicated by the PERF_OBJECT_TIMER bit and the difference is scaled by the PerfFreq of the Object to convert the time units into seconds."
+ },
+ {
+ "Counter": "ID Process",
+ "Value": {
+ "data": 0,
+ "time": 0,
+ "type": 65536,
+ "multi": 0,
+ "frequency": 0
+ },
+ "Help": "ID Process is the unique identifier of this process. ID Process numbers are reused, so they only identify a process for the lifetime of that process.",
+ "Type": "PERF_COUNTER_RAWCOUNT",
+ "Algorithm": "data0",
+ "Description": "A counter which should not be time averaged on display (such as an error counter on a serial line). Display as is. No Display Suffix."
+ },
+ {
+ "Counter": "Creating Process ID",
+ "Value": {
+ "data": 0,
+ "time": 0,
+ "type": 65536,
+ "multi": 0,
+ "frequency": 0
+ },
+ "Help": "The Creating Process ID value is the Process ID of the process that created the process. The creating process may have terminated, so this value may no longer identify a running process.",
+ "Type": "PERF_COUNTER_RAWCOUNT",
+ "Algorithm": "data0",
+ "Description": "A counter which should not be time averaged on display (such as an error counter on a serial line). Display as is. No Display Suffix."
+ },
+ {
+ "Counter": "Pool Paged Bytes",
+ "Value": {
+ "data": 0,
+ "time": 0,
+ "type": 65536,
+ "multi": 0,
+ "frequency": 0
+ },
+ "Help": "Pool Paged Bytes is the size, in bytes, of the paged pool, an area of the system virtual memory that is used for objects that can be written to disk when they are not being used. Memory\\\\Pool Paged Bytes is calculated differently than Process\\\\Pool Paged Bytes, so it might not equal Process(_Total)\\\\Pool Paged Bytes. This counter displays the last observed value only; it is not an average.",
+ "Type": "PERF_COUNTER_RAWCOUNT",
+ "Algorithm": "data0",
+ "Description": "A counter which should not be time averaged on display (such as an error counter on a serial line). Display as is. No Display Suffix."
+ },
+ {
+ "Counter": "Pool Nonpaged Bytes",
+ "Value": {
+ "data": 272,
+ "time": 0,
+ "type": 65536,
+ "multi": 0,
+ "frequency": 0
+ },
+ "Help": "Pool Nonpaged Bytes is the size, in bytes, of the nonpaged pool, an area of the system virtual memory that is used for objects that cannot be written to disk, but must remain in physical memory as long as they are allocated. Memory\\\\Pool Nonpaged Bytes is calculated differently than Process\\\\Pool Nonpaged Bytes, so it might not equal Process(_Total)\\\\Pool Nonpaged Bytes. This counter displays the last observed value only; it is not an average.",
+ "Type": "PERF_COUNTER_RAWCOUNT",
+ "Algorithm": "data0",
+ "Description": "A counter which should not be time averaged on display (such as an error counter on a serial line). Display as is. No Display Suffix."
+ },
+ {
+ "Counter": "Handle Count",
+ "Value": {
+ "data": 0,
+ "time": 0,
+ "type": 65536,
+ "multi": 0,
+ "frequency": 0
+ },
+ "Help": "The total number of handles currently open by this process. This number is equal to the sum of the handles currently open by each thread in this process.",
+ "Type": "PERF_COUNTER_RAWCOUNT",
+ "Algorithm": "data0",
+ "Description": "A counter which should not be time averaged on display (such as an error counter on a serial line). Display as is. No Display Suffix."
+ },
+ {
+ "Counter": "IO Read Operations/sec",
+ "Value": {
+ "data": 0,
+ "time": 9242655165203,
+ "type": 272696576,
+ "multi": 0,
+ "frequency": 10000000
+ },
+ "Help": "The rate at which the process is issuing read I/O operations. This counter counts all I/O activity generated by the process to include file, network and device I/Os.",
+ "Type": "PERF_COUNTER_BULK_COUNT",
+ "Algorithm": "(data1 - data0) / ((time1 - time0) / frequency)",
+ "Description": "64-bit Counter. Divide delta by delta time. Display Suffix: \"/sec\""
+ },
+ {
+ "Counter": "IO Write Operations/sec",
+ "Value": {
+ "data": 0,
+ "time": 9242655165203,
+ "type": 272696576,
+ "multi": 0,
+ "frequency": 10000000
+ },
+ "Help": "The rate at which the process is issuing write I/O operations. This counter counts all I/O activity generated by the process to include file, network and device I/Os.",
+ "Type": "PERF_COUNTER_BULK_COUNT",
+ "Algorithm": "(data1 - data0) / ((time1 - time0) / frequency)",
+ "Description": "64-bit Counter. Divide delta by delta time. Display Suffix: \"/sec\""
+ },
+ {
+ "Counter": "IO Data Operations/sec",
+ "Value": {
+ "data": 0,
+ "time": 9242655165203,
+ "type": 272696576,
+ "multi": 0,
+ "frequency": 10000000
+ },
+ "Help": "The rate at which the process is issuing read and write I/O operations. This counter counts all I/O activity generated by the process to include file, network and device I/Os.",
+ "Type": "PERF_COUNTER_BULK_COUNT",
+ "Algorithm": "(data1 - data0) / ((time1 - time0) / frequency)",
+ "Description": "64-bit Counter. Divide delta by delta time. Display Suffix: \"/sec\""
+ },
+ {
+ "Counter": "IO Other Operations/sec",
+ "Value": {
+ "data": 0,
+ "time": 9242655165203,
+ "type": 272696576,
+ "multi": 0,
+ "frequency": 10000000
+ },
+ "Help": "The rate at which the process is issuing I/O operations that are neither read nor write operations (for example, a control function). This counter counts all I/O activity generated by the process to include file, network and device I/Os.",
+ "Type": "PERF_COUNTER_BULK_COUNT",
+ "Algorithm": "(data1 - data0) / ((time1 - time0) / frequency)",
+ "Description": "64-bit Counter. Divide delta by delta time. Display Suffix: \"/sec\""
+ },
+ {
+ "Counter": "IO Read Bytes/sec",
+ "Value": {
+ "data": 0,
+ "time": 9242655165203,
+ "type": 272696576,
+ "multi": 0,
+ "frequency": 10000000
+ },
+ "Help": "The rate at which the process is reading bytes from I/O operations. This counter counts all I/O activity generated by the process to include file, network and device I/Os.",
+ "Type": "PERF_COUNTER_BULK_COUNT",
+ "Algorithm": "(data1 - data0) / ((time1 - time0) / frequency)",
+ "Description": "64-bit Counter. Divide delta by delta time. Display Suffix: \"/sec\""
+ },
+ {
+ "Counter": "IO Write Bytes/sec",
+ "Value": {
+ "data": 0,
+ "time": 9242655165203,
+ "type": 272696576,
+ "multi": 0,
+ "frequency": 10000000
+ },
+ "Help": "The rate at which the process is writing bytes to I/O operations. This counter counts all I/O activity generated by the process to include file, network and device I/Os.",
+ "Type": "PERF_COUNTER_BULK_COUNT",
+ "Algorithm": "(data1 - data0) / ((time1 - time0) / frequency)",
+ "Description": "64-bit Counter. Divide delta by delta time. Display Suffix: \"/sec\""
+ },
+ {
+ "Counter": "IO Data Bytes/sec",
+ "Value": {
+ "data": 0,
+ "time": 9242655165203,
+ "type": 272696576,
+ "multi": 0,
+ "frequency": 10000000
+ },
+ "Help": "The rate at which the process is reading and writing bytes in I/O operations. This counter counts all I/O activity generated by the process to include file, network and device I/Os.",
+ "Type": "PERF_COUNTER_BULK_COUNT",
+ "Algorithm": "(data1 - data0) / ((time1 - time0) / frequency)",
+ "Description": "64-bit Counter. Divide delta by delta time. Display Suffix: \"/sec\""
+ },
+ {
+ "Counter": "IO Other Bytes/sec",
+ "Value": {
+ "data": 0,
+ "time": 9242655165203,
+ "type": 272696576,
+ "multi": 0,
+ "frequency": 10000000
+ },
+ "Help": "The rate at which the process is issuing bytes to I/O operations that do not involve data such as control operations. This counter counts all I/O activity generated by the process to include file, network and device I/Os.",
+ "Type": "PERF_COUNTER_BULK_COUNT",
+ "Algorithm": "(data1 - data0) / ((time1 - time0) / frequency)",
+ "Description": "64-bit Counter. Divide delta by delta time. Display Suffix: \"/sec\""
+ },
+ {
+ "Counter": "Working Set - Private",
+ "Value": {
+ "data": 8192,
+ "time": 0,
+ "type": 65792,
+ "multi": 0,
+ "frequency": 0
+ },
+ "Help": "Working Set - Private displays the size of the working set, in bytes, that is use for this process only and not shared nor sharable by other processes.",
+ "Type": "PERF_COUNTER_LARGE_RAWCOUNT",
+ "Algorithm": "data0",
+ "Description": "A counter which should not be time averaged on display (such as an error counter on a serial line). Display as is. No Display Suffix."
+ }
+ ]
+ },
+ */
+
+
+#if defined(OS_WINDOWS)
+
+#include <tlhelp32.h>
+#include <wchar.h>
+#include <psapi.h>
+#include <tchar.h>
+#include <strsafe.h>
+
+WCHAR* GetProcessCommandLine(HANDLE hProcess);
+
+struct perflib_data {
+ PERF_DATA_BLOCK *pDataBlock;
+ PERF_OBJECT_TYPE *pObjectType;
+ PERF_INSTANCE_DEFINITION *pi;
+ DWORD pid;
+};
+
+void apps_os_init_windows(void) {
+ PerflibNamesRegistryInitialize();
+
+ if(!EnableWindowsPrivilege(SE_DEBUG_NAME))
+ nd_log(NDLS_COLLECTORS, NDLP_WARNING, "Failed to enable %s privilege", SE_DEBUG_NAME);
+
+ if(!EnableWindowsPrivilege(SE_SYSTEM_PROFILE_NAME))
+ nd_log(NDLS_COLLECTORS, NDLP_WARNING, "Failed to enable %s privilege", SE_SYSTEM_PROFILE_NAME);
+
+ if(!EnableWindowsPrivilege(SE_PROF_SINGLE_PROCESS_NAME))
+ nd_log(NDLS_COLLECTORS, NDLP_WARNING, "Failed to enable %s privilege", SE_PROF_SINGLE_PROCESS_NAME);
+}
+
+uint64_t apps_os_get_total_memory_windows(void) {
+ MEMORYSTATUSEX memStat = { 0 };
+ memStat.dwLength = sizeof(memStat);
+
+ if (!GlobalMemoryStatusEx(&memStat)) {
+ netdata_log_error("GlobalMemoryStatusEx() failed.");
+ return 0;
+ }
+
+ return memStat.ullTotalPhys;
+}
+
+// remove the PID suffix and .exe suffix, if any
+static void fix_windows_comm(struct pid_stat *p, char *comm) {
+ char pid[UINT64_MAX_LENGTH + 1]; // +1 for the underscore
+ pid[0] = '_';
+ print_uint64(&pid[1], p->pid);
+ size_t pid_len = strlen(pid);
+ size_t comm_len = strlen(comm);
+ if (pid_len < comm_len) {
+ char *compare = &comm[comm_len - pid_len];
+ if (strcmp(pid, compare) == 0)
+ *compare = '\0';
+ }
+
+ // remove the .exe suffix, if any
+ comm_len = strlen(comm);
+ size_t exe_len = strlen(".exe");
+ if(exe_len < comm_len) {
+ char *compare = &comm[comm_len - exe_len];
+ if (strcmp(".exe", compare) == 0)
+ *compare = '\0';
+ }
+}
+
+// Convert wide string to UTF-8
+static char *wchar_to_utf8(WCHAR *s) {
+ static __thread char utf8[PATH_MAX];
+ static __thread int utf8_size = sizeof(utf8);
+
+ int len = WideCharToMultiByte(CP_UTF8, 0, s, -1, NULL, 0, NULL, NULL);
+ if (len <= 0 || len >= utf8_size)
+ return NULL;
+
+ WideCharToMultiByte(CP_UTF8, 0, s, -1, utf8, utf8_size, NULL, NULL);
+ return utf8;
+}
+
+static char *ansi_to_utf8(LPCSTR str) {
+ static __thread WCHAR unicode[PATH_MAX];
+
+ // Step 1: Convert ANSI string (LPSTR) to wide string (UTF-16)
+ size_t count = any_to_utf16(CP_ACP, unicode, _countof(unicode), str, -1, NULL);
+ if (!count) return NULL;
+
+ return wchar_to_utf8(unicode);
+}
+
+// --------------------------------------------------------------------------------------------------------------------
+
+// return a sanitized name for the process
+STRING *GetProcessFriendlyNameFromPathSanitized(WCHAR *path) {
+ static __thread uint8_t void_buf[1024 * 1024];
+ static __thread DWORD void_buf_size = sizeof(void_buf);
+ static __thread wchar_t unicode[PATH_MAX];
+ static __thread DWORD unicode_size = sizeof(unicode) / sizeof(*unicode);
+
+ DWORD handle;
+ DWORD size = GetFileVersionInfoSizeW(path, &handle);
+ if (size == 0 || size > void_buf_size)
+ return FALSE;
+
+ if (GetFileVersionInfoW(path, handle, size, void_buf)) {
+ LPWSTR value = NULL;
+ UINT len = 0;
+ if (VerQueryValueW(void_buf, L"\\StringFileInfo\\040904B0\\FileDescription", (LPVOID*)&value, &len) &&
+ len > 0 && len < unicode_size) {
+ wcsncpy(unicode, value, unicode_size - 1);
+ unicode[unicode_size - 1] = L'\0';
+ char *name = wchar_to_utf8(unicode);
+ sanitize_apps_plugin_chart_meta(name);
+ return string_strdupz(name);
+ }
+ }
+
+ return NULL;
+}
+
+#define SERVICE_PREFIX "Service "
+// return a sanitized name for the process
+static STRING *GetNameFromCmdlineSanitized(struct pid_stat *p) {
+ if(!p->cmdline) return NULL;
+
+ char buf[string_strlen(p->cmdline) + 1];
+ memcpy(buf, string2str(p->cmdline), sizeof(buf));
+ char *words[100];
+ size_t num_words = quoted_strings_splitter(buf, words, 100, isspace_map_pluginsd);
+
+ if(string_strcmp(p->comm, "svchost") == 0) {
+ // find -s SERVICE in the command line
+ for(size_t i = 0; i < num_words ;i++) {
+ if(strcmp(words[i], "-s") == 0 && i + 1 < num_words) {
+ char service[strlen(words[i + 1]) + sizeof(SERVICE_PREFIX)]; // sizeof() includes a null
+ strcpy(service, SERVICE_PREFIX);
+ strcpy(&service[sizeof(SERVICE_PREFIX) - 1], words[i + 1]);
+ sanitize_apps_plugin_chart_meta(service);
+ return string_strdupz(service);
+ }
+ }
+ }
+
+ return NULL;
+}
+
+static void GetServiceNames(void) {
+ SC_HANDLE hSCManager = OpenSCManager(NULL, NULL, SC_MANAGER_ENUMERATE_SERVICE);
+ if (hSCManager == NULL) return;
+
+ DWORD dwBytesNeeded = 0, dwServicesReturned = 0, dwResumeHandle = 0;
+ ENUM_SERVICE_STATUS_PROCESS *pServiceStatus = NULL;
+
+ // First, query the required buffer size
+ EnumServicesStatusEx(
+ hSCManager, SC_ENUM_PROCESS_INFO, SERVICE_WIN32, SERVICE_STATE_ALL,
+ NULL, 0, &dwBytesNeeded, &dwServicesReturned, &dwResumeHandle, NULL);
+
+ if (dwBytesNeeded == 0) {
+ CloseServiceHandle(hSCManager);
+ return;
+ }
+
+ // Allocate memory to hold the services
+ pServiceStatus = mallocz(dwBytesNeeded);
+
+ // Now, retrieve the list of services
+ if (!EnumServicesStatusEx(
+ hSCManager, SC_ENUM_PROCESS_INFO, SERVICE_WIN32, SERVICE_STATE_ALL,
+ (LPBYTE)pServiceStatus, dwBytesNeeded, &dwBytesNeeded, &dwServicesReturned,
+ &dwResumeHandle, NULL)) {
+ freez(pServiceStatus);
+ CloseServiceHandle(hSCManager);
+ return;
+ }
+
+ // Loop through the services
+ for (DWORD i = 0; i < dwServicesReturned; i++) {
+ if(!pServiceStatus[i].lpDisplayName || !*pServiceStatus[i].lpDisplayName)
+ continue;
+
+ struct pid_stat *p = find_pid_entry((pid_t)pServiceStatus[i].ServiceStatusProcess.dwProcessId);
+ if(p && !p->got_service) {
+ p->got_service = true;
+
+ char *name = ansi_to_utf8(pServiceStatus[i].lpDisplayName);
+ if(name) {
+ sanitize_apps_plugin_chart_meta(name);
+ string_freez(p->name);
+ p->name = string_strdupz(name);
+ }
+ }
+ }
+
+ free(pServiceStatus);
+ CloseServiceHandle(hSCManager);
+}
+
+static WCHAR *executable_path_from_cmdline(WCHAR *cmdline) {
+ if (!cmdline || !*cmdline) return NULL;
+
+ WCHAR *exe_path_start = cmdline;
+ WCHAR *exe_path_end = NULL;
+
+ if (cmdline[0] == L'"') {
+ // Command line starts with a double quote
+ exe_path_start++; // Move past the first double quote
+ exe_path_end = wcschr(exe_path_start, L'"'); // Find the next quote
+ }
+ else {
+ // Command line does not start with a double quote
+ exe_path_end = wcschr(exe_path_start, L' '); // Find the first space
+ }
+
+ if (exe_path_end) {
+ // Null-terminate the string at the end of the executable path
+ *exe_path_end = L'\0';
+ return exe_path_start;
+ }
+
+ return NULL;
+}
+
+static BOOL GetProcessUserSID(HANDLE hProcess, PSID *ppSid) {
+ HANDLE hToken;
+ BOOL result = FALSE;
+ DWORD dwSize = 0;
+ PTOKEN_USER pTokenUser = NULL;
+
+ if (!OpenProcessToken(hProcess, TOKEN_QUERY, &hToken))
+ return FALSE;
+
+ GetTokenInformation(hToken, TokenUser, NULL, 0, &dwSize);
+ if (dwSize == 0) {
+ CloseHandle(hToken);
+ return FALSE;
+ }
+
+ pTokenUser = (PTOKEN_USER)LocalAlloc(LPTR, dwSize);
+ if (pTokenUser == NULL) {
+ CloseHandle(hToken);
+ return FALSE;
+ }
+
+ if (GetTokenInformation(hToken, TokenUser, pTokenUser, dwSize, &dwSize)) {
+ DWORD sidSize = GetLengthSid(pTokenUser->User.Sid);
+ *ppSid = (PSID)LocalAlloc(LPTR, sidSize);
+ if (*ppSid) {
+ if (CopySid(sidSize, *ppSid, pTokenUser->User.Sid)) {
+ result = TRUE;
+ } else {
+ LocalFree(*ppSid);
+ *ppSid = NULL;
+ }
+ }
+ }
+
+ LocalFree(pTokenUser);
+ CloseHandle(hToken);
+ return result;
+}
+
+void GetAllProcessesInfo(void) {
+ static __thread wchar_t unicode[PATH_MAX];
+ static __thread DWORD unicode_size = sizeof(unicode) / sizeof(*unicode);
+
+ calls_counter++;
+
+ HANDLE hSnapshot = CreateToolhelp32Snapshot(TH32CS_SNAPPROCESS, 0);
+ if (hSnapshot == INVALID_HANDLE_VALUE) return;
+
+ PROCESSENTRY32W pe32;
+ pe32.dwSize = sizeof(PROCESSENTRY32W);
+
+ if (!Process32FirstW(hSnapshot, &pe32)) {
+ CloseHandle(hSnapshot);
+ return;
+ }
+
+ bool need_service_names = false;
+
+ do {
+ if(!pe32.th32ProcessID) continue;
+
+ struct pid_stat *p = get_or_allocate_pid_entry((pid_t)pe32.th32ProcessID);
+ p->ppid = (pid_t)pe32.th32ParentProcessID;
+ if(p->got_info) continue;
+ p->got_info = true;
+
+ HANDLE hProcess = OpenProcess(PROCESS_QUERY_INFORMATION | PROCESS_VM_READ, FALSE, p->pid);
+ if (hProcess == NULL)
+ continue;
+
+ // Get the full command line, if possible
+ {
+ WCHAR *cmdline = GetProcessCommandLine(hProcess); // returns malloc'd buffer
+ if (cmdline) {
+ update_pid_cmdline(p, wchar_to_utf8(cmdline));
+
+ // extract the process full path from the command line
+ WCHAR *path = executable_path_from_cmdline(cmdline);
+ if(path) {
+ string_freez(p->name);
+ p->name = GetProcessFriendlyNameFromPathSanitized(path);
+ }
+
+ free(cmdline); // free(), not freez()
+ }
+ }
+
+ if(!p->cmdline || !p->name) {
+ if (QueryFullProcessImageNameW(hProcess, 0, unicode, &unicode_size)) {
+ // put the full path name to the command into cmdline
+ if(!p->cmdline)
+ update_pid_cmdline(p, wchar_to_utf8(unicode));
+
+ if(!p->name)
+ p->name = GetProcessFriendlyNameFromPathSanitized(unicode);
+ }
+ }
+
+ if(!p->sid_name) {
+ PSID pSid = NULL;
+ if (GetProcessUserSID(hProcess, &pSid))
+ p->sid_name = cached_sid_fullname_or_sid_str(pSid);
+ else
+ p->sid_name = string_strdupz("Unknown");
+ }
+
+ CloseHandle(hProcess);
+
+ char *comm = wchar_to_utf8(pe32.szExeFile);
+ fix_windows_comm(p, comm);
+ update_pid_comm(p, comm); // will sanitize p->comm
+
+ if(!need_service_names && string_strcmp(p->comm, "svchost") == 0)
+ need_service_names = true;
+
+ STRING *better_name = GetNameFromCmdlineSanitized(p);
+ if(better_name) {
+ string_freez(p->name);
+ p->name = better_name;
+ }
+
+ } while (Process32NextW(hSnapshot, &pe32));
+
+ CloseHandle(hSnapshot);
+
+ if(need_service_names)
+ GetServiceNames();
+}
+
+static inline kernel_uint_t perflib_cpu_utilization(COUNTER_DATA *d) {
+ internal_fatal(d->current.CounterType != PERF_100NSEC_TIMER,
+ "Wrong timer type");
+
+ ULONGLONG data1 = d->current.Data;
+ ULONGLONG data0 = d->previous.Data;
+ LONGLONG time1 = d->current.Time;
+ LONGLONG time0 = d->previous.Time;
+
+ /*
+ * The Windows documentation provides the formula for percentage:
+ *
+ * 100 * (data1 - data0) / (time1 - time0)
+ *
+ * To get a fraction (0.0 to 1.0) instead of a percentage, we
+ * simply remove the 100 multiplier:
+ *
+ * (data1 - data0) / (time1 - time0)
+ *
+ * This fraction represents the portion of a single CPU core used
+ * over the time period. Multiplying this fraction by NSEC_PER_SEC
+ * converts it to nanosecond-cores:
+ *
+ * NSEC_PER_SEC * (data1 - data0) / (time1 - time0)
+ */
+
+ LONGLONG dt = time1 - time0;
+ if(dt > 0)
+ return NSEC_PER_SEC * (data1 - data0) / dt;
+ else
+ return 0;
+}
+
+static inline kernel_uint_t perflib_rate(COUNTER_DATA *d) {
+ ULONGLONG data1 = d->current.Data;
+ ULONGLONG data0 = d->previous.Data;
+ LONGLONG time1 = d->current.Time;
+ LONGLONG time0 = d->previous.Time;
+
+ LONGLONG dt = (time1 - time0);
+ if(dt > 0)
+ return (RATES_DETAIL * (data1 - data0)) / dt;
+ else
+ return 0;
+}
+
+static inline kernel_uint_t perflib_value(COUNTER_DATA *d) {
+ internal_fatal(d->current.CounterType != PERF_COUNTER_LARGE_RAWCOUNT &&
+ d->current.CounterType != PERF_COUNTER_RAWCOUNT,
+ "Wrong gauge type");
+
+ return d->current.Data;
+}
+
+static inline kernel_uint_t perflib_elapsed(COUNTER_DATA *d) {
+ ULONGLONG data1 = d->current.Data;
+ LONGLONG time1 = d->current.Time;
+ LONGLONG freq1 = d->current.Frequency;
+
+ internal_fatal(d->current.CounterType != PERF_ELAPSED_TIME || !freq1,
+ "Wrong gauge type");
+
+ if(!data1 || !time1 || !freq1 || data1 > (ULONGLONG)time1)
+ return 0;
+
+ return (time1 - data1) / freq1;
+}
+
+bool apps_os_collect_all_pids_windows(void) {
+ calls_counter++;
+
+ struct perflib_data d = { 0 };
+ d.pDataBlock = perflibGetPerformanceData(RegistryFindIDByName("Process"));
+ if(!d.pDataBlock) return false;
+
+ d.pObjectType = perflibFindObjectTypeByName(d.pDataBlock, "Process");
+ if(!d.pObjectType) {
+ perflibFreePerformanceData();
+ return false;
+ }
+
+ // we need these outside the loop to avoid searching by name all the time
+ // (our perflib library caches the id inside the COUNTER_DATA).
+ COUNTER_DATA processId = {.key = "ID Process"};
+
+ d.pi = NULL;
+ size_t added = 0;
+ for(LONG i = 0; i < d.pObjectType->NumInstances; i++) {
+ d.pi = perflibForEachInstance(d.pDataBlock, d.pObjectType, d.pi);
+ if (!d.pi) break;
+
+ perflibGetInstanceCounter(d.pDataBlock, d.pObjectType, d.pi, &processId);
+ d.pid = (DWORD) processId.current.Data;
+ if (d.pid <= 0) continue; // 0 = Idle (this takes all the spare resources)
+
+ // Get or create pid_stat structure
+ struct pid_stat *p = get_or_allocate_pid_entry((pid_t) d.pid);
+
+ if (unlikely(!p->initialized)) {
+ // a new pid
+ p->initialized = true;
+
+ static __thread char comm[MAX_PATH];
+
+ if (getInstanceName(d.pDataBlock, d.pObjectType, d.pi, comm, sizeof(comm)))
+ fix_windows_comm(p, comm);
+ else
+ strncpyz(comm, "unknown", sizeof(comm) - 1);
+
+ if(strcmp(comm, "wininit") == 0)
+ INIT_PID = p->pid;
+
+ update_pid_comm(p, comm); // will sanitize p->comm
+ added++;
+
+ COUNTER_DATA ppid = {.key = "Creating Process ID"};
+ perflibGetInstanceCounter(d.pDataBlock, d.pObjectType, d.pi, &ppid);
+ p->ppid = (pid_t) ppid.current.Data;
+
+ p->perflib[PDF_UTIME].key = "% User Time";
+ p->perflib[PDF_STIME].key = "% Privileged Time";
+ p->perflib[PDF_VMSIZE].key = "Virtual Bytes";
+ p->perflib[PDF_VMRSS].key = "Working Set";
+ p->perflib[PDF_VMSWAP].key = "Page File Bytes";
+ p->perflib[PDF_LREAD].key = "IO Read Bytes/sec";
+ p->perflib[PDF_LWRITE].key = "IO Write Bytes/sec";
+ p->perflib[PDF_OREAD].key = "IO Read Operations/sec";
+ p->perflib[PDF_OWRITE].key = "IO Write Operations/sec";
+ p->perflib[PDF_THREADS].key = "Thread Count";
+ p->perflib[PDF_HANDLES].key = "Handle Count";
+ p->perflib[PDF_MINFLT].key = "Page Faults/sec";
+ p->perflib[PDF_UPTIME].key = "Elapsed Time";
+ }
+
+ pid_collection_started(p);
+
+ // get all data from perflib
+ size_t ok = 0, failed = 0, invalid = 0;
+ for (PID_FIELD f = 0; f < PDF_MAX; f++) {
+ if (p->perflib[f].key) {
+ if (!perflibGetInstanceCounter(d.pDataBlock, d.pObjectType, d.pi, &p->perflib[f])) {
+ failed++;
+ nd_log(NDLS_COLLECTORS, NDLP_ERR,
+ "Cannot find field '%s' in processes data", p->perflib[f].key);
+ } else
+ ok++;
+ } else
+ invalid++;
+ }
+
+ if(failed) {
+ pid_collection_failed(p);
+ continue;
+ }
+
+ // CPU time
+ p->values[PDF_UTIME] = perflib_cpu_utilization(&p->perflib[PDF_UTIME]);
+ p->values[PDF_STIME] = perflib_cpu_utilization(&p->perflib[PDF_STIME]);
+
+ // Memory
+ p->values[PDF_VMRSS] = perflib_value(&p->perflib[PDF_VMRSS]);
+ p->values[PDF_VMSIZE] = perflib_value(&p->perflib[PDF_VMSIZE]);
+ p->values[PDF_VMSWAP] = perflib_value(&p->perflib[PDF_VMSWAP]);
+
+ // I/O
+ p->values[PDF_LREAD] = perflib_rate(&p->perflib[PDF_LREAD]);
+ p->values[PDF_LWRITE] = perflib_rate(&p->perflib[PDF_LWRITE]);
+ p->values[PDF_OREAD] = perflib_rate(&p->perflib[PDF_OREAD]);
+ p->values[PDF_OWRITE] = perflib_rate(&p->perflib[PDF_OWRITE]);
+
+ // Threads
+ p->values[PDF_THREADS] = perflib_value(&p->perflib[PDF_THREADS]);
+
+ // Handle count
+ p->values[PDF_HANDLES] = perflib_value(&p->perflib[PDF_HANDLES]);
+
+ // Page faults
+ // Windows doesn't distinguish between minor and major page faults
+ p->values[PDF_MINFLT] = perflib_rate(&p->perflib[PDF_MINFLT]);
+
+ // Process uptime
+ // Convert 100-nanosecond units to seconds
+ p->values[PDF_UPTIME] = perflib_elapsed(&p->perflib[PDF_UPTIME]);
+
+ pid_collection_completed(p);
+
+// if(p->perflib[PDF_UTIME].current.Data != p->perflib[PDF_UTIME].previous.Data &&
+// p->perflib[PDF_UTIME].current.Data && p->perflib[PDF_UTIME].previous.Data &&
+// p->pid == 61812) {
+// const char *cmd = string2str(p->comm);
+// uint64_t cpu_divisor = NSEC_PER_SEC / 100ULL;
+// uint64_t cpus = os_get_system_cpus();
+// double u = (double)p->values[PDF_UTIME] / cpu_divisor;
+// double s = (double)p->values[PDF_STIME] / cpu_divisor;
+// int x = 0;
+// x++;
+// }
+ }
+
+ perflibFreePerformanceData();
+
+ if(added) {
+ GetAllProcessesInfo();
+
+#if (USE_APPS_GROUPS_CONF == 1)
+ for(struct pid_stat *p = root_of_pids(); p ;p = p->next) {
+ if(!p->assigned_to_target)
+ assign_app_group_target_to_pid(p);
+ }
+#endif
+ }
+
+ return true;
+}
+
+#endif
diff --git a/src/collectors/apps.plugin/apps_os_windows_nt.c b/src/collectors/apps.plugin/apps_os_windows_nt.c
new file mode 100644
index 000000000..ff47cbcab
--- /dev/null
+++ b/src/collectors/apps.plugin/apps_os_windows_nt.c
@@ -0,0 +1,41 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+// this must not include libnetdata.h because STRING is defined in winternl.h
+
+#include "libnetdata/common.h"
+
+#if defined(OS_WINDOWS)
+#include <winternl.h>
+
+// --------------------------------------------------------------------------------------------------------------------
+// Get the full windows command line
+
+WCHAR* GetProcessCommandLine(HANDLE hProcess) {
+ PROCESS_BASIC_INFORMATION pbi;
+ ULONG len;
+ NTSTATUS status = NtQueryInformationProcess(hProcess, 0, &pbi, sizeof(pbi), &len);
+ if (status != 0)
+ return NULL;
+
+ // The rest of the function remains the same as before
+ PEB peb;
+ if (!ReadProcessMemory(hProcess, pbi.PebBaseAddress, &peb, sizeof(peb), NULL))
+ return NULL;
+
+ RTL_USER_PROCESS_PARAMETERS procParams;
+ if (!ReadProcessMemory(hProcess, peb.ProcessParameters, &procParams, sizeof(procParams), NULL))
+ return NULL;
+
+ WCHAR* commandLine = (WCHAR*)malloc(procParams.CommandLine.MaximumLength);
+ if (!commandLine)
+ return NULL;
+
+ if (!ReadProcessMemory(hProcess, procParams.CommandLine.Buffer, commandLine, procParams.CommandLine.MaximumLength, NULL)) {
+ free(commandLine);
+ return NULL;
+ }
+
+ return commandLine;
+}
+
+#endif
diff --git a/src/collectors/apps.plugin/apps_output.c b/src/collectors/apps.plugin/apps_output.c
index 84928e641..b9ee5252a 100644
--- a/src/collectors/apps.plugin/apps_output.c
+++ b/src/collectors/apps.plugin/apps_output.c
@@ -76,7 +76,7 @@ void send_resource_usage_to_netdata(usec_t dt) {
"SET inode_changes = %zu\n"
"SET link_changes = %zu\n"
"SET pids = %zu\n"
- "SET fds = %d\n"
+ "SET fds = %"PRIu32"\n"
"SET targets = %zu\n"
"SET new_pids = %zu\n"
"END\n"
@@ -89,8 +89,8 @@ void send_resource_usage_to_netdata(usec_t dt) {
, filenames_allocated_counter
, inodes_changed_counter
, links_changed_counter
- , all_pids_count
- , all_files_len
+ , all_pids_count()
+ , all_files_len_get()
, apps_groups_targets_count
, targets_assignment_counter
);
@@ -103,103 +103,124 @@ void send_collected_data_to_netdata(struct target *root, const char *type, usec_
if (unlikely(!w->exposed))
continue;
- send_BEGIN(type, w->clean_name, "processes", dt);
- send_SET("processes", w->processes);
+ send_BEGIN(type, string2str(w->clean_name), "processes", dt);
+ send_SET("processes", w->values[PDF_PROCESSES]);
send_END();
- send_BEGIN(type, w->clean_name, "threads", dt);
- send_SET("threads", w->num_threads);
+ send_BEGIN(type, string2str(w->clean_name), "threads", dt);
+ send_SET("threads", w->values[PDF_THREADS]);
send_END();
- if (unlikely(!w->processes && !w->is_other))
+ if (unlikely(!w->values[PDF_PROCESSES]))
continue;
- send_BEGIN(type, w->clean_name, "cpu_utilization", dt);
- send_SET("user", (kernel_uint_t)(w->utime * utime_fix_ratio) + (include_exited_childs ? ((kernel_uint_t)(w->cutime * cutime_fix_ratio)) : 0ULL));
- send_SET("system", (kernel_uint_t)(w->stime * stime_fix_ratio) + (include_exited_childs ? ((kernel_uint_t)(w->cstime * cstime_fix_ratio)) : 0ULL));
+#if (PROCESSES_HAVE_CPU_CHILDREN_TIME)
+ send_BEGIN(type, string2str(w->clean_name), "cpu_utilization", dt);
+ send_SET("user", (kernel_uint_t)(w->values[PDF_UTIME] * utime_fix_ratio) + (include_exited_childs ? ((kernel_uint_t)(w->values[PDF_CUTIME] * cutime_fix_ratio)) : 0ULL));
+ send_SET("system", (kernel_uint_t)(w->values[PDF_STIME] * stime_fix_ratio) + (include_exited_childs ? ((kernel_uint_t)(w->values[PDF_CSTIME] * cstime_fix_ratio)) : 0ULL));
send_END();
+#else
+ send_BEGIN(type, string2str(w->clean_name), "cpu_utilization", dt);
+ send_SET("user", (kernel_uint_t)(w->values[PDF_UTIME] * utime_fix_ratio));
+ send_SET("system", (kernel_uint_t)(w->values[PDF_STIME] * stime_fix_ratio));
+ send_END();
+#endif
-#if !defined(__FreeBSD__) && !defined(__APPLE__)
+#if (PROCESSES_HAVE_CPU_GUEST_TIME == 1)
if (enable_guest_charts) {
- send_BEGIN(type, w->clean_name, "cpu_guest_utilization", dt);
- send_SET("guest", (kernel_uint_t)(w->gtime * gtime_fix_ratio) + (include_exited_childs ? ((kernel_uint_t)(w->cgtime * cgtime_fix_ratio)) : 0ULL));
+ send_BEGIN(type, string2str(w->clean_name), "cpu_guest_utilization", dt);
+ send_SET("guest", (kernel_uint_t)(w->values[PDF_GTIME] * gtime_fix_ratio)
+#if (PROCESSES_HAVE_CPU_CHILDREN_TIME == 1)
+ + (include_exited_childs ? ((kernel_uint_t)(w->values[PDF_CGTIME] * cgtime_fix_ratio)) : 0ULL)
+#endif
+ );
send_END();
}
+#endif
- send_BEGIN(type, w->clean_name, "cpu_context_switches", dt);
- send_SET("voluntary", w->status_voluntary_ctxt_switches);
- send_SET("involuntary", w->status_nonvoluntary_ctxt_switches);
+ send_BEGIN(type, string2str(w->clean_name), "mem_private_usage", dt);
+#if (PROCESSES_HAVE_VMSHARED == 1)
+ send_SET("mem", (w->values[PDF_VMRSS] > w->values[PDF_VMSHARED])?(w->values[PDF_VMRSS] - w->values[PDF_VMSHARED]) : 0ULL);
+#else
+ send_SET("mem", w->values[PDF_VMRSS]);
+#endif
send_END();
- send_BEGIN(type, w->clean_name, "mem_private_usage", dt);
- send_SET("mem", (w->status_vmrss > w->status_vmshared)?(w->status_vmrss - w->status_vmshared) : 0ULL);
+#if (PROCESSES_HAVE_VOLCTX == 1) || (PROCESSES_HAVE_NVOLCTX == 1)
+ send_BEGIN(type, string2str(w->clean_name), "cpu_context_switches", dt);
+#if (PROCESSES_HAVE_VOLCTX == 1)
+ send_SET("voluntary", w->values[PDF_VOLCTX]);
+#endif
+#if (PROCESSES_HAVE_NVOLCTX == 1)
+ send_SET("involuntary", w->values[PDF_NVOLCTX]);
+#endif
send_END();
#endif
- send_BEGIN(type, w->clean_name, "mem_usage", dt);
- send_SET("rss", w->status_vmrss);
+ send_BEGIN(type, string2str(w->clean_name), "mem_usage", dt);
+ send_SET("rss", w->values[PDF_VMRSS]);
send_END();
-#if !defined(__APPLE__)
- send_BEGIN(type, w->clean_name, "vmem_usage", dt);
- send_SET("vmem", w->status_vmsize);
+ send_BEGIN(type, string2str(w->clean_name), "vmem_usage", dt);
+ send_SET("vmem", w->values[PDF_VMSIZE]);
send_END();
-#endif
- send_BEGIN(type, w->clean_name, "mem_page_faults", dt);
- send_SET("minor", (kernel_uint_t)(w->minflt * minflt_fix_ratio) + (include_exited_childs ? ((kernel_uint_t)(w->cminflt * cminflt_fix_ratio)) : 0ULL));
- send_SET("major", (kernel_uint_t)(w->majflt * majflt_fix_ratio) + (include_exited_childs ? ((kernel_uint_t)(w->cmajflt * cmajflt_fix_ratio)) : 0ULL));
+ send_BEGIN(type, string2str(w->clean_name), "mem_page_faults", dt);
+ send_SET("minor", (kernel_uint_t)(w->values[PDF_MINFLT] * minflt_fix_ratio)
+#if (PROCESSES_HAVE_CHILDREN_FLTS == 1)
+ + (include_exited_childs ? ((kernel_uint_t)(w->values[PDF_CMINFLT] * cminflt_fix_ratio)) : 0ULL)
+#endif
+ );
+#if (PROCESSES_HAVE_MAJFLT == 1)
+ send_SET("major", (kernel_uint_t)(w->values[PDF_MAJFLT] * majflt_fix_ratio)
+#if (PROCESSES_HAVE_CHILDREN_FLTS == 1)
+ + (include_exited_childs ? ((kernel_uint_t)(w->values[PDF_CMAJFLT] * cmajflt_fix_ratio)) : 0ULL)
+#endif
+ );
+#endif
send_END();
-#if !defined(__FreeBSD__) && !defined(__APPLE__)
- send_BEGIN(type, w->clean_name, "swap_usage", dt);
- send_SET("swap", w->status_vmswap);
+#if (PROCESSES_HAVE_VMSWAP == 1)
+ send_BEGIN(type, string2str(w->clean_name), "swap_usage", dt);
+ send_SET("swap", w->values[PDF_VMSWAP]);
send_END();
#endif
- if (w->processes == 0) {
- send_BEGIN(type, w->clean_name, "uptime", dt);
- send_SET("uptime", 0);
- send_END();
+ send_BEGIN(type, string2str(w->clean_name), "uptime", dt);
+ send_SET("uptime", w->uptime_max);
+ send_END();
- if (enable_detailed_uptime_charts) {
- send_BEGIN(type, w->clean_name, "uptime_summary", dt);
- send_SET("min", 0);
- send_SET("avg", 0);
- send_SET("max", 0);
- send_END();
- }
- } else {
- send_BEGIN(type, w->clean_name, "uptime", dt);
- send_SET("uptime", w->uptime_max);
+ if (enable_detailed_uptime_charts) {
+ send_BEGIN(type, string2str(w->clean_name), "uptime_summary", dt);
+ send_SET("min", w->uptime_min);
+ send_SET("avg", w->values[PDF_PROCESSES] > 0 ? w->values[PDF_UPTIME] / w->values[PDF_PROCESSES] : 0);
+ send_SET("max", w->uptime_max);
send_END();
-
- if (enable_detailed_uptime_charts) {
- send_BEGIN(type, w->clean_name, "uptime_summary", dt);
- send_SET("min", w->uptime_min);
- send_SET("avg", w->processes > 0 ? w->uptime_sum / w->processes : 0);
- send_SET("max", w->uptime_max);
- send_END();
- }
}
- send_BEGIN(type, w->clean_name, "disk_physical_io", dt);
- send_SET("reads", w->io_storage_bytes_read);
- send_SET("writes", w->io_storage_bytes_written);
+#if (PROCESSES_HAVE_PHYSICAL_IO == 1)
+ send_BEGIN(type, string2str(w->clean_name), "disk_physical_io", dt);
+ send_SET("reads", w->values[PDF_PREAD]);
+ send_SET("writes", w->values[PDF_PWRITE]);
send_END();
+#endif
-#if !defined(__FreeBSD__) && !defined(__APPLE__)
- send_BEGIN(type, w->clean_name, "disk_logical_io", dt);
- send_SET("reads", w->io_logical_bytes_read);
- send_SET("writes", w->io_logical_bytes_written);
+#if (PROCESSES_HAVE_LOGICAL_IO == 1)
+ send_BEGIN(type, string2str(w->clean_name), "disk_logical_io", dt);
+ send_SET("reads", w->values[PDF_LREAD]);
+ send_SET("writes", w->values[PDF_LWRITE]);
send_END();
#endif
+
if (enable_file_charts) {
- send_BEGIN(type, w->clean_name, "fds_open_limit", dt);
+#if (PROCESSES_HAVE_FDS == 1)
+ send_BEGIN(type, string2str(w->clean_name), "fds_open_limit", dt);
send_SET("limit", w->max_open_files_percent * 100.0);
send_END();
+#endif
- send_BEGIN(type, w->clean_name, "fds_open", dt);
+ send_BEGIN(type, string2str(w->clean_name), "fds_open", dt);
+#if (PROCESSES_HAVE_FDS == 1)
send_SET("files", w->openfds.files);
send_SET("sockets", w->openfds.sockets);
send_SET("pipes", w->openfds.sockets);
@@ -209,6 +230,10 @@ void send_collected_data_to_netdata(struct target *root, const char *type, usec_
send_SET("signal", w->openfds.signalfds);
send_SET("eventpolls", w->openfds.eventpolls);
send_SET("other", w->openfds.other);
+#endif
+#if (PROCESSES_HAVE_HANDLES == 1)
+ send_SET("handles", w->values[PDF_HANDLES]);
+#endif
send_END();
}
}
@@ -218,137 +243,166 @@ void send_collected_data_to_netdata(struct target *root, const char *type, usec_
// ----------------------------------------------------------------------------
// generate the charts
+static void send_file_charts_to_netdata(struct target *w, const char *type, const char *lbl_name, const char *title, bool obsolete) {
+#if (PROCESSES_HAVE_FDS == 1)
+ fprintf(stdout, "CHART %s.%s_fds_open_limit '' '%s open file descriptors limit' '%%' fds %s.fds_open_limit line 20200 %d %s\n",
+ type, string2str(w->clean_name), title, type, update_every, obsolete ? "obsolete" : "");
+
+ if(!obsolete) {
+ fprintf(stdout, "CLABEL '%s' '%s' 1\n", lbl_name, string2str(w->name));
+ fprintf(stdout, "CLABEL_COMMIT\n");
+ fprintf(stdout, "DIMENSION limit '' absolute 1 100\n");
+ }
+#endif
+
+#if (PROCESSES_HAVE_FDS == 1) || (PROCESSES_HAVE_HANDLES == 1)
+ fprintf(stdout, "CHART %s.%s_fds_open '' '%s open files descriptors' 'fds' fds %s.fds_open stacked 20210 %d %s\n",
+ type, string2str(w->clean_name), title, type, update_every, obsolete ? "obsolete" : "");
+
+ if(!obsolete) {
+ fprintf(stdout, "CLABEL '%s' '%s' 1\n", lbl_name, string2str(w->name));
+ fprintf(stdout, "CLABEL_COMMIT\n");
+#if (PROCESSES_HAVE_FDS == 1)
+ fprintf(stdout, "DIMENSION files '' absolute 1 1\n");
+ fprintf(stdout, "DIMENSION sockets '' absolute 1 1\n");
+ fprintf(stdout, "DIMENSION pipes '' absolute 1 1\n");
+ fprintf(stdout, "DIMENSION inotifies '' absolute 1 1\n");
+ fprintf(stdout, "DIMENSION event '' absolute 1 1\n");
+ fprintf(stdout, "DIMENSION timer '' absolute 1 1\n");
+ fprintf(stdout, "DIMENSION signal '' absolute 1 1\n");
+ fprintf(stdout, "DIMENSION eventpolls '' absolute 1 1\n");
+ fprintf(stdout, "DIMENSION other '' absolute 1 1\n");
+#endif // PROCESSES_HAVE_FDS
+#if (PROCESSES_HAVE_HANDLES == 1)
+ fprintf(stdout, "DIMENSION handles '' absolute 1 1\n");
+#endif // PROCESSES_HAVE_HANDLES
+ }
+#endif // PROCESSES_HAVE_FDS || PROCESSES_HAVE_HANDLES
+}
+
void send_charts_updates_to_netdata(struct target *root, const char *type, const char *lbl_name, const char *title) {
struct target *w;
- if (debug_enabled) {
- for (w = root; w; w = w->next) {
- if (unlikely(!w->target && w->processes)) {
- struct pid_on_target *pid_on_target;
- fprintf(stderr, "apps.plugin: target '%s' has aggregated %u process(es):", w->name, w->processes);
- for (pid_on_target = w->root_pid; pid_on_target; pid_on_target = pid_on_target->next) {
- fprintf(stderr, " %d", pid_on_target->pid);
- }
- fputc('\n', stderr);
- }
- }
- }
+ bool disable_file_charts_on_this_run = obsolete_file_charts;
+ obsolete_file_charts = false;
for (w = root; w; w = w->next) {
- if (likely(w->exposed || (!w->processes && !w->is_other)))
+ if (likely(w->exposed || (!w->values[PDF_PROCESSES]))) {
+ if(w->exposed && disable_file_charts_on_this_run)
+ send_file_charts_to_netdata(w, type, lbl_name, title, true);
continue;
+ }
- w->exposed = 1;
+ w->exposed = true;
- fprintf(stdout, "CHART %s.%s_cpu_utilization '' '%s CPU utilization (100%% = 1 core)' 'percentage' cpu %s.cpu_utilization stacked 20001 %d\n", type, w->clean_name, title, type, update_every);
- fprintf(stdout, "CLABEL '%s' '%s' 1\n", lbl_name, w->name);
+ fprintf(stdout, "CHART %s.%s_cpu_utilization '' '%s CPU utilization (100%% = 1 core)' 'percentage' cpu %s.cpu_utilization stacked 20001 %d\n",
+ type, string2str(w->clean_name), title, type, update_every);
+ fprintf(stdout, "CLABEL '%s' '%s' 1\n", lbl_name, string2str(w->name));
fprintf(stdout, "CLABEL_COMMIT\n");
- fprintf(stdout, "DIMENSION user '' absolute 1 %llu\n", time_factor * RATES_DETAIL / 100LLU);
- fprintf(stdout, "DIMENSION system '' absolute 1 %llu\n", time_factor * RATES_DETAIL / 100LLU);
+ fprintf(stdout, "DIMENSION user '' absolute 1 %llu\n", NSEC_PER_SEC / 100ULL);
+ fprintf(stdout, "DIMENSION system '' absolute 1 %llu\n", NSEC_PER_SEC / 100ULL);
-#if !defined(__FreeBSD__) && !defined(__APPLE__)
+#if (PROCESSES_HAVE_CPU_GUEST_TIME == 1)
if (enable_guest_charts) {
- fprintf(stdout, "CHART %s.%s_cpu_guest_utilization '' '%s CPU guest utlization (100%% = 1 core)' 'percentage' cpu %s.cpu_guest_utilization line 20005 %d\n", type, w->clean_name, title, type, update_every);
- fprintf(stdout, "CLABEL '%s' '%s' 1\n", lbl_name, w->name);
+ fprintf(stdout, "CHART %s.%s_cpu_guest_utilization '' '%s CPU guest utlization (100%% = 1 core)' 'percentage' cpu %s.cpu_guest_utilization line 20005 %d\n",
+ type, string2str(w->clean_name), title, type, update_every);
+ fprintf(stdout, "CLABEL '%s' '%s' 1\n", lbl_name, string2str(w->name));
fprintf(stdout, "CLABEL_COMMIT\n");
- fprintf(stdout, "DIMENSION guest '' absolute 1 %llu\n", time_factor * RATES_DETAIL / 100LLU);
+ fprintf(stdout, "DIMENSION guest '' absolute 1 %llu\n", NSEC_PER_SEC / 100ULL);
}
+#endif
- fprintf(stdout, "CHART %s.%s_cpu_context_switches '' '%s CPU context switches' 'switches/s' cpu %s.cpu_context_switches stacked 20010 %d\n", type, w->clean_name, title, type, update_every);
- fprintf(stdout, "CLABEL '%s' '%s' 1\n", lbl_name, w->name);
+ fprintf(stdout, "CHART %s.%s_mem_private_usage '' '%s memory usage without shared' 'MiB' mem %s.mem_private_usage area 20050 %d\n",
+ type, string2str(w->clean_name), title, type, update_every);
+ fprintf(stdout, "CLABEL '%s' '%s' 1\n", lbl_name, string2str(w->name));
fprintf(stdout, "CLABEL_COMMIT\n");
- fprintf(stdout, "DIMENSION voluntary '' absolute 1 %llu\n", RATES_DETAIL);
- fprintf(stdout, "DIMENSION involuntary '' absolute 1 %llu\n", RATES_DETAIL);
+ fprintf(stdout, "DIMENSION mem '' absolute %ld %ld\n", 1L, 1024L * 1024L);
- fprintf(stdout, "CHART %s.%s_mem_private_usage '' '%s memory usage without shared' 'MiB' mem %s.mem_private_usage area 20050 %d\n", type, w->clean_name, title, type, update_every);
- fprintf(stdout, "CLABEL '%s' '%s' 1\n", lbl_name, w->name);
+#if (PROCESSES_HAVE_VOLCTX == 1) || (PROCESSES_HAVE_NVOLCTX == 1)
+ fprintf(stdout, "CHART %s.%s_cpu_context_switches '' '%s CPU context switches' 'switches/s' cpu %s.cpu_context_switches stacked 20010 %d\n",
+ type, string2str(w->clean_name), title, type, update_every);
+ fprintf(stdout, "CLABEL '%s' '%s' 1\n", lbl_name, string2str(w->name));
fprintf(stdout, "CLABEL_COMMIT\n");
- fprintf(stdout, "DIMENSION mem '' absolute %ld %ld\n", 1L, 1024L);
+#if (PROCESSES_HAVE_VOLCTX == 1)
+ fprintf(stdout, "DIMENSION voluntary '' absolute 1 %llu\n", RATES_DETAIL);
+#endif
+#if (PROCESSES_HAVE_NVOLCTX == 1)
+ fprintf(stdout, "DIMENSION involuntary '' absolute 1 %llu\n", RATES_DETAIL);
+#endif
#endif
- fprintf(stdout, "CHART %s.%s_mem_usage '' '%s memory RSS usage' 'MiB' mem %s.mem_usage area 20055 %d\n", type, w->clean_name, title, type, update_every);
- fprintf(stdout, "CLABEL '%s' '%s' 1\n", lbl_name, w->name);
+ fprintf(stdout, "CHART %s.%s_mem_usage '' '%s memory RSS usage' 'MiB' mem %s.mem_usage area 20055 %d\n",
+ type, string2str(w->clean_name), title, type, update_every);
+ fprintf(stdout, "CLABEL '%s' '%s' 1\n", lbl_name, string2str(w->name));
fprintf(stdout, "CLABEL_COMMIT\n");
- fprintf(stdout, "DIMENSION rss '' absolute %ld %ld\n", 1L, 1024L);
+ fprintf(stdout, "DIMENSION rss '' absolute %ld %ld\n", 1L, 1024L * 1024L);
-#if !defined(__APPLE__)
- fprintf(stdout, "CHART %s.%s_vmem_usage '' '%s virtual memory size' 'MiB' mem %s.vmem_usage line 20065 %d\n", type, w->clean_name, title, type, update_every);
- fprintf(stdout, "CLABEL '%s' '%s' 1\n", lbl_name, w->name);
+ fprintf(stdout, "CHART %s.%s_vmem_usage '' '%s virtual memory size' 'MiB' mem %s.vmem_usage line 20065 %d\n",
+ type, string2str(w->clean_name), title, type, update_every);
+ fprintf(stdout, "CLABEL '%s' '%s' 1\n", lbl_name, string2str(w->name));
fprintf(stdout, "CLABEL_COMMIT\n");
- fprintf(stdout, "DIMENSION vmem '' absolute %ld %ld\n", 1L, 1024L);
-#endif
+ fprintf(stdout, "DIMENSION vmem '' absolute %ld %ld\n", 1L, 1024L * 1024L);
- fprintf(stdout, "CHART %s.%s_mem_page_faults '' '%s memory page faults' 'pgfaults/s' mem %s.mem_page_faults stacked 20060 %d\n", type, w->clean_name, title, type, update_every);
- fprintf(stdout, "CLABEL '%s' '%s' 1\n", lbl_name, w->name);
+ fprintf(stdout, "CHART %s.%s_mem_page_faults '' '%s memory page faults' 'pgfaults/s' mem %s.mem_page_faults stacked 20060 %d\n",
+ type, string2str(w->clean_name), title, type, update_every);
+ fprintf(stdout, "CLABEL '%s' '%s' 1\n", lbl_name, string2str(w->name));
fprintf(stdout, "CLABEL_COMMIT\n");
- fprintf(stdout, "DIMENSION major '' absolute 1 %llu\n", RATES_DETAIL);
fprintf(stdout, "DIMENSION minor '' absolute 1 %llu\n", RATES_DETAIL);
+#if (PROCESSES_HAVE_MAJFLT == 1)
+ fprintf(stdout, "DIMENSION major '' absolute 1 %llu\n", RATES_DETAIL);
+#endif
-#if !defined(__FreeBSD__) && !defined(__APPLE__)
- fprintf(stdout, "CHART %s.%s_swap_usage '' '%s swap usage' 'MiB' mem %s.swap_usage area 20065 %d\n", type, w->clean_name, title, type, update_every);
- fprintf(stdout, "CLABEL '%s' '%s' 1\n", lbl_name, w->name);
+#if (PROCESSES_HAVE_VMSWAP == 1)
+ fprintf(stdout, "CHART %s.%s_swap_usage '' '%s swap usage' 'MiB' mem %s.swap_usage area 20065 %d\n",
+ type, string2str(w->clean_name), title, type, update_every);
+ fprintf(stdout, "CLABEL '%s' '%s' 1\n", lbl_name, string2str(w->name));
fprintf(stdout, "CLABEL_COMMIT\n");
- fprintf(stdout, "DIMENSION swap '' absolute %ld %ld\n", 1L, 1024L);
+ fprintf(stdout, "DIMENSION swap '' absolute %ld %ld\n", 1L, 1024L * 1024L);
#endif
-#if !defined(__FreeBSD__) && !defined(__APPLE__)
- fprintf(stdout, "CHART %s.%s_disk_physical_io '' '%s disk physical IO' 'KiB/s' disk %s.disk_physical_io area 20100 %d\n", type, w->clean_name, title, type, update_every);
- fprintf(stdout, "CLABEL '%s' '%s' 1\n", lbl_name, w->name);
+#if (PROCESSES_HAVE_PHYSICAL_IO == 1)
+ fprintf(stdout, "CHART %s.%s_disk_physical_io '' '%s disk physical IO' 'KiB/s' disk %s.disk_physical_io area 20100 %d\n",
+ type, string2str(w->clean_name), title, type, update_every);
+ fprintf(stdout, "CLABEL '%s' '%s' 1\n", lbl_name, string2str(w->name));
fprintf(stdout, "CLABEL_COMMIT\n");
fprintf(stdout, "DIMENSION reads '' absolute 1 %llu\n", 1024LLU * RATES_DETAIL);
fprintf(stdout, "DIMENSION writes '' absolute -1 %llu\n", 1024LLU * RATES_DETAIL);
+#endif
- fprintf(stdout, "CHART %s.%s_disk_logical_io '' '%s disk logical IO' 'KiB/s' disk %s.disk_logical_io area 20105 %d\n", type, w->clean_name, title, type, update_every);
- fprintf(stdout, "CLABEL '%s' '%s' 1\n", lbl_name, w->name);
+#if (PROCESSES_HAVE_LOGICAL_IO == 1)
+ fprintf(stdout, "CHART %s.%s_disk_logical_io '' '%s disk logical IO' 'KiB/s' disk %s.disk_logical_io area 20105 %d\n",
+ type, string2str(w->clean_name), title, type, update_every);
+ fprintf(stdout, "CLABEL '%s' '%s' 1\n", lbl_name, string2str(w->name));
fprintf(stdout, "CLABEL_COMMIT\n");
fprintf(stdout, "DIMENSION reads '' absolute 1 %llu\n", 1024LLU * RATES_DETAIL);
fprintf(stdout, "DIMENSION writes '' absolute -1 %llu\n", 1024LLU * RATES_DETAIL);
-#else
- fprintf(stdout, "CHART %s.%s_disk_physical_io '' '%s disk physical IO' 'blocks/s' disk %s.disk_physical_block_io area 20100 %d\n", type, w->clean_name, title, type, update_every);
- fprintf(stdout, "CLABEL '%s' '%s' 1\n", lbl_name, w->name);
- fprintf(stdout, "CLABEL_COMMIT\n");
- fprintf(stdout, "DIMENSION reads '' absolute 1 %llu\n", RATES_DETAIL);
- fprintf(stdout, "DIMENSION writes '' absolute -1 %llu\n", RATES_DETAIL);
#endif
- fprintf(stdout, "CHART %s.%s_processes '' '%s processes' 'processes' processes %s.processes line 20150 %d\n", type, w->clean_name, title, type, update_every);
- fprintf(stdout, "CLABEL '%s' '%s' 1\n", lbl_name, w->name);
+ fprintf(stdout, "CHART %s.%s_processes '' '%s processes' 'processes' processes %s.processes line 20150 %d\n",
+ type, string2str(w->clean_name), title, type, update_every);
+ fprintf(stdout, "CLABEL '%s' '%s' 1\n", lbl_name, string2str(w->name));
fprintf(stdout, "CLABEL_COMMIT\n");
fprintf(stdout, "DIMENSION processes '' absolute 1 1\n");
- fprintf(stdout, "CHART %s.%s_threads '' '%s threads' 'threads' processes %s.threads line 20155 %d\n", type, w->clean_name, title, type, update_every);
- fprintf(stdout, "CLABEL '%s' '%s' 1\n", lbl_name, w->name);
+ fprintf(stdout, "CHART %s.%s_threads '' '%s threads' 'threads' processes %s.threads line 20155 %d\n",
+ type, string2str(w->clean_name), title, type, update_every);
+ fprintf(stdout, "CLABEL '%s' '%s' 1\n", lbl_name, string2str(w->name));
fprintf(stdout, "CLABEL_COMMIT\n");
fprintf(stdout, "DIMENSION threads '' absolute 1 1\n");
- if (enable_file_charts) {
- fprintf(stdout, "CHART %s.%s_fds_open_limit '' '%s open file descriptors limit' '%%' fds %s.fds_open_limit line 20200 %d\n", type, w->clean_name, title, type, update_every);
- fprintf(stdout, "CLABEL '%s' '%s' 1\n", lbl_name, w->name);
- fprintf(stdout, "CLABEL_COMMIT\n");
- fprintf(stdout, "DIMENSION limit '' absolute 1 100\n");
+ if (enable_file_charts)
+ send_file_charts_to_netdata(w, type, lbl_name, title, false);
- fprintf(stdout, "CHART %s.%s_fds_open '' '%s open files descriptors' 'fds' fds %s.fds_open stacked 20210 %d\n", type, w->clean_name, title, type, update_every);
- fprintf(stdout, "CLABEL '%s' '%s' 1\n", lbl_name, w->name);
- fprintf(stdout, "CLABEL_COMMIT\n");
- fprintf(stdout, "DIMENSION files '' absolute 1 1\n");
- fprintf(stdout, "DIMENSION sockets '' absolute 1 1\n");
- fprintf(stdout, "DIMENSION pipes '' absolute 1 1\n");
- fprintf(stdout, "DIMENSION inotifies '' absolute 1 1\n");
- fprintf(stdout, "DIMENSION event '' absolute 1 1\n");
- fprintf(stdout, "DIMENSION timer '' absolute 1 1\n");
- fprintf(stdout, "DIMENSION signal '' absolute 1 1\n");
- fprintf(stdout, "DIMENSION eventpolls '' absolute 1 1\n");
- fprintf(stdout, "DIMENSION other '' absolute 1 1\n");
- }
-
- fprintf(stdout, "CHART %s.%s_uptime '' '%s uptime' 'seconds' uptime %s.uptime line 20250 %d\n", type, w->clean_name, title, type, update_every);
- fprintf(stdout, "CLABEL '%s' '%s' 1\n", lbl_name, w->name);
+ fprintf(stdout, "CHART %s.%s_uptime '' '%s uptime' 'seconds' uptime %s.uptime line 20250 %d\n",
+ type, string2str(w->clean_name), title, type, update_every);
+ fprintf(stdout, "CLABEL '%s' '%s' 1\n", lbl_name, string2str(w->name));
fprintf(stdout, "CLABEL_COMMIT\n");
fprintf(stdout, "DIMENSION uptime '' absolute 1 1\n");
if (enable_detailed_uptime_charts) {
- fprintf(stdout, "CHART %s.%s_uptime_summary '' '%s uptime summary' 'seconds' uptime %s.uptime_summary area 20255 %d\n", type, w->clean_name, title, type, update_every);
- fprintf(stdout, "CLABEL '%s' '%s' 1\n", lbl_name, w->name);
+ fprintf(stdout, "CHART %s.%s_uptime_summary '' '%s uptime summary' 'seconds' uptime %s.uptime_summary area 20255 %d\n",
+ type, string2str(w->clean_name), title, type, update_every);
+ fprintf(stdout, "CLABEL '%s' '%s' 1\n", lbl_name, string2str(w->name));
fprintf(stdout, "CLABEL_COMMIT\n");
fprintf(stdout, "DIMENSION min '' absolute 1 1\n");
fprintf(stdout, "DIMENSION avg '' absolute 1 1\n");
@@ -357,8 +411,8 @@ void send_charts_updates_to_netdata(struct target *root, const char *type, const
}
}
+#if (PROCESSES_HAVE_STATE == 1)
void send_proc_states_count(usec_t dt __maybe_unused) {
-#if !defined(__FreeBSD__) && !defined(__APPLE__)
static bool chart_added = false;
// create chart for count of processes in different states
if (!chart_added) {
@@ -379,6 +433,6 @@ void send_proc_states_count(usec_t dt __maybe_unused) {
send_SET(proc_states[i], proc_state_count[i]);
}
send_END();
-#endif
}
+#endif
diff --git a/src/collectors/apps.plugin/apps_pid.c b/src/collectors/apps.plugin/apps_pid.c
new file mode 100644
index 000000000..0dcee7cce
--- /dev/null
+++ b/src/collectors/apps.plugin/apps_pid.c
@@ -0,0 +1,927 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "apps_plugin.h"
+
+static inline void link_pid_to_its_parent(struct pid_stat *p);
+
+// --------------------------------------------------------------------------------------------------------------------
+// The index of all pids
+
+#define SIMPLE_HASHTABLE_NAME _PID
+#define SIMPLE_HASHTABLE_VALUE_TYPE struct pid_stat
+#define SIMPLE_HASHTABLE_KEY_TYPE int32_t
+#define SIMPLE_HASHTABLE_VALUE2KEY_FUNCTION pid_stat_to_pid_ptr
+#define SIMPLE_HASHTABLE_COMPARE_KEYS_FUNCTION pid_ptr_eq
+#define SIMPLE_HASHTABLE_SAMPLE_IMPLEMENTATION 0
+#include "libnetdata/simple_hashtable/simple_hashtable.h"
+
+static inline int32_t *pid_stat_to_pid_ptr(struct pid_stat *p) {
+ return &p->pid;
+}
+
+static inline bool pid_ptr_eq(int32_t *a, int32_t *b) {
+ return *a == *b;
+}
+
+struct {
+#if (ALL_PIDS_ARE_READ_INSTANTLY == 0)
+ // Another pre-allocated list of all possible pids.
+ // We need it to assign them a unique sortlist id, so that we
+ // read parents before children. This is needed to prevent a situation where
+ // a child is found running, but until we read its parent, it has exited and
+ // its parent has accumulated its resources.
+ struct {
+ size_t size;
+ struct pid_stat **array;
+ } sorted;
+#endif
+
+ struct {
+ size_t count; // the number of processes running
+ struct pid_stat *root;
+ SIMPLE_HASHTABLE_PID ht;
+ ARAL *aral;
+ } all_pids;
+} pids = { 0 };
+
+struct pid_stat *root_of_pids(void) {
+ return pids.all_pids.root;
+}
+
+size_t all_pids_count(void) {
+ return pids.all_pids.count;
+}
+
+void apps_pids_init(void) {
+ pids.all_pids.aral = aral_create("pid_stat", sizeof(struct pid_stat), 1, 65536, NULL, NULL, NULL, false, true);
+ simple_hashtable_init_PID(&pids.all_pids.ht, 1024);
+}
+
+static inline uint64_t pid_hash(pid_t pid) {
+ return XXH3_64bits(&pid, sizeof(pid));
+}
+
+inline struct pid_stat *find_pid_entry(pid_t pid) {
+ if(pid < INIT_PID) return NULL;
+
+ uint64_t hash = pid_hash(pid);
+ int32_t key = pid;
+ SIMPLE_HASHTABLE_SLOT_PID *sl = simple_hashtable_get_slot_PID(&pids.all_pids.ht, hash, &key, true);
+ return(SIMPLE_HASHTABLE_SLOT_DATA(sl));
+}
+
+struct pid_stat *get_or_allocate_pid_entry(pid_t pid) {
+ uint64_t hash = pid_hash(pid);
+ int32_t key = pid;
+ SIMPLE_HASHTABLE_SLOT_PID *sl = simple_hashtable_get_slot_PID(&pids.all_pids.ht, hash, &key, true);
+ struct pid_stat *p = SIMPLE_HASHTABLE_SLOT_DATA(sl);
+ if(likely(p))
+ return p;
+
+ p = aral_callocz(pids.all_pids.aral);
+
+#if (PROCESSES_HAVE_FDS == 1)
+ p->fds = mallocz(sizeof(struct pid_fd) * 3); // stdin, stdout, stderr
+ p->fds_size = 3;
+ init_pid_fds(p, 0, p->fds_size);
+#endif
+
+ p->pid = pid;
+ p->values[PDF_PROCESSES] = 1;
+
+ DOUBLE_LINKED_LIST_APPEND_ITEM_UNSAFE(pids.all_pids.root, p, prev, next);
+ simple_hashtable_set_slot_PID(&pids.all_pids.ht, sl, hash, p);
+ pids.all_pids.count++;
+
+ return p;
+}
+
+void del_pid_entry(pid_t pid) {
+ uint64_t hash = pid_hash(pid);
+ int32_t key = pid;
+ SIMPLE_HASHTABLE_SLOT_PID *sl = simple_hashtable_get_slot_PID(&pids.all_pids.ht, hash, &key, true);
+ struct pid_stat *p = SIMPLE_HASHTABLE_SLOT_DATA(sl);
+
+ if(unlikely(!p)) {
+ netdata_log_error("attempted to free pid %d that is not allocated.", pid);
+ return;
+ }
+
+ debug_log("process %d %s exited, deleting it.", pid, pid_stat_comm(p));
+
+ DOUBLE_LINKED_LIST_REMOVE_ITEM_UNSAFE(pids.all_pids.root, p, prev, next);
+ simple_hashtable_del_slot_PID(&pids.all_pids.ht, sl);
+
+#if defined(OS_LINUX)
+ {
+ size_t i;
+ for(i = 0; i < p->fds_size; i++)
+ if(p->fds[i].filename)
+ freez(p->fds[i].filename);
+ }
+
+ arl_free(p->status_arl);
+
+ freez(p->fds_dirname);
+ freez(p->stat_filename);
+ freez(p->status_filename);
+ freez(p->limits_filename);
+ freez(p->io_filename);
+ freez(p->cmdline_filename);
+#endif
+
+#if (PROCESSES_HAVE_FDS == 1)
+ freez(p->fds);
+#endif
+
+#if (PROCESSES_HAVE_SID == 1)
+ string_freez(p->sid_name);
+#endif
+
+ string_freez(p->comm_orig);
+ string_freez(p->comm);
+ string_freez(p->cmdline);
+ aral_freez(pids.all_pids.aral, p);
+
+ pids.all_pids.count--;
+}
+
+// --------------------------------------------------------------------------------------------------------------------
+
+static __thread pid_t current_pid;
+static __thread kernel_uint_t current_pid_values[PDF_MAX];
+
+void pid_collection_started(struct pid_stat *p) {
+ fatal_assert(sizeof(current_pid_values) == sizeof(p->values));
+ current_pid = p->pid;
+ memcpy(current_pid_values, p->values, sizeof(current_pid_values));
+ memset(p->values, 0, sizeof(p->values));
+ p->values[PDF_PROCESSES] = 1;
+ p->read = true;
+}
+
+void pid_collection_failed(struct pid_stat *p) {
+ fatal_assert(current_pid == p->pid);
+ fatal_assert(sizeof(current_pid_values) == sizeof(p->values));
+ memcpy(p->values, current_pid_values, sizeof(p->values));
+}
+
+void pid_collection_completed(struct pid_stat *p) {
+ p->updated = true;
+ p->keep = false;
+ p->keeploops = 0;
+}
+
+// --------------------------------------------------------------------------------------------------------------------
+// preloading of parents before their children
+
+#if (ALL_PIDS_ARE_READ_INSTANTLY == 0)
+static inline size_t compute_new_sorted_size(size_t old_size, size_t required_size) {
+ size_t size = (required_size % 1024 == 0) ? required_size : required_size + 1024;
+ size = (size / 1024) * 1024;
+
+ if(size < old_size * 2)
+ size = old_size * 2;
+
+ return size;
+}
+
+static int compar_pid_sortlist(const void *a, const void *b) {
+ const struct pid_stat *p1 = *(struct pid_stat **)a;
+ const struct pid_stat *p2 = *(struct pid_stat **)b;
+
+ if(p1->sortlist > p2->sortlist)
+ return -1;
+ else
+ return 1;
+}
+
+bool collect_parents_before_children(void) {
+ if (!pids.all_pids.count) return false;
+
+ if (pids.all_pids.count > pids.sorted.size) {
+ size_t new_size = compute_new_sorted_size(pids.sorted.size, pids.all_pids.count);
+ freez(pids.sorted.array);
+ pids.sorted.array = mallocz(new_size * sizeof(struct pid_stat *));
+ pids.sorted.size = new_size;
+ }
+
+ size_t slc = 0;
+ struct pid_stat *p = NULL;
+ uint32_t sortlist = 1;
+ for (p = root_of_pids(); p && slc < pids.sorted.size; p = p->next) {
+ pids.sorted.array[slc++] = p;
+
+ // assign a sortlist id to all it and its parents
+ for (struct pid_stat *pp = p; pp ; pp = pp->parent)
+ pp->sortlist = sortlist++;
+ }
+ size_t sorted = slc;
+
+ static bool logged = false;
+ if (unlikely(p && !logged)) {
+ nd_log(
+ NDLS_COLLECTORS,
+ NDLP_ERR,
+ "Internal error: I was thinking I had %zu processes in my arrays, but it seems there are more.",
+ pids.all_pids.count);
+ logged = true;
+ }
+
+ if (include_exited_childs && sorted) {
+ // Read parents before childs
+ // This is needed to prevent a situation where
+ // a child is found running, but until we read
+ // its parent, it has exited and its parent
+ // has accumulated its resources.
+
+ qsort((void *)pids.sorted.array, sorted, sizeof(struct pid_stat *), compar_pid_sortlist);
+
+ // we forward read all running processes
+ // incrementally_collect_data_for_pid() is smart enough,
+ // not to read the same pid twice per iteration
+ for (slc = 0; slc < sorted; slc++) {
+ p = pids.sorted.array[slc];
+ incrementally_collect_data_for_pid_stat(p, NULL);
+ }
+ }
+
+ return true;
+}
+#endif
+
+// --------------------------------------------------------------------------------------------------------------------
+
+static void log_parent_loop(struct pid_stat *p) {
+ CLEAN_BUFFER *wb = buffer_create(0, NULL);
+ buffer_sprintf(wb, "original pid %d (%s)", p->pid, string2str(p->comm));
+
+ size_t loops = 0;
+ for(struct pid_stat *t = p->parent; t && loops < 2 ;t = t->parent) {
+ buffer_sprintf(wb, " => %d (%s)", t->pid, string2str(t->comm));
+ if(t == p->parent) loops++;
+ }
+
+ buffer_sprintf(wb, " : broke loop at %d (%s)", p->pid, string2str(p->comm));
+
+ errno_clear();
+ nd_log(NDLS_COLLECTORS, NDLP_WARNING, "Parents loop detected: %s", buffer_tostring(wb));
+}
+
+static inline bool is_already_a_parent(struct pid_stat *p, struct pid_stat *pp) {
+ for(struct pid_stat *t = pp; t ;t = t->parent)
+ if(t == p) return true;
+
+ return false;
+}
+
+static inline void link_pid_to_its_parent(struct pid_stat *p) {
+ p->parent = NULL;
+ if(unlikely(!p->ppid))
+ return;
+
+ if(unlikely(p->ppid == p->pid)) {
+ nd_log(NDLS_COLLECTORS, NDLP_WARNING,
+ "Process %d (%s) states parent %d, which is the same PID. Ignoring it.",
+ p->pid, string2str(p->comm), p->ppid);
+ p->ppid = 0;
+ return;
+ }
+
+ struct pid_stat *pp = find_pid_entry(p->ppid);
+ if(likely(pp)) {
+ fatal_assert(pp->pid == p->ppid);
+
+ if(!is_already_a_parent(p, pp)) {
+ p->parent = pp;
+ pp->children_count++;
+ }
+ else {
+ p->parent = pp;
+ log_parent_loop(p);
+ p->parent = NULL;
+ p->ppid = 0;
+ }
+ }
+#if (PPID_SHOULD_BE_RUNNING == 1)
+ else {
+ nd_log(NDLS_COLLECTORS, NDLP_WARNING,
+ "pid %d %s states parent %d, but the later does not exist.",
+ p->pid, pid_stat_comm(p), p->ppid);
+ }
+#endif
+}
+
+static inline void link_all_processes_to_their_parents(void) {
+ // link all children to their parents
+ // and update children count on parents
+ for(struct pid_stat *p = root_of_pids(); p ; p = p->next)
+ link_pid_to_its_parent(p);
+}
+
+// --------------------------------------------------------------------------------------------------------------------
+
+static bool is_filename(const char *s) {
+ if(!s || !*s) return false;
+
+#if defined(OS_WINDOWS)
+ if( (isalpha((uint8_t)*s) || (s[1] == ':' && s[2] == '\\')) || // windows native "x:\"
+ (isalpha((uint8_t)*s) || (s[1] == ':' && s[2] == '/')) || // windows native "x:/"
+ (*s == '\\' && s[1] == '\\' && isalpha((uint8_t)s[2]) && s[3] == '\\') || // windows native "\\x\"
+ (*s == '/' && s[1] == '/' && isalpha((uint8_t)s[2]) && s[3] == '/')) { // windows native "//x/"
+
+ WCHAR ws[FILENAME_MAX];
+ if(utf8_to_utf16(ws, _countof(ws), s, -1) > 0) {
+ DWORD attributes = GetFileAttributesW(ws);
+ if (attributes != INVALID_FILE_ATTRIBUTES)
+ return true;
+ }
+ }
+#endif
+
+ // for: sh -c "exec /path/to/command parameters"
+ if(strncmp(s, "exec ", 5) == 0 && s[5]) {
+ s += 5;
+ char look_for = ' ';
+ if(*s == '\'') { look_for = '\''; s++; }
+ if(*s == '"') { look_for = '"'; s++; }
+ char *end = strchr(s, look_for);
+ if(end) *end = '\0';
+ }
+
+ // linux, freebsd, macos, msys, cygwin
+ if(*s == '/') {
+ struct statvfs stat;
+ return statvfs(s, &stat) == 0;
+ }
+
+ return false;
+}
+
+static const char *extensions_to_strip[] = {
+ ".sh", // shell scripts
+ ".py", // python scripts
+ ".pl", // perl scripts
+ ".js", // node.js
+#if defined(OS_WINDOWS)
+ ".exe",
+#endif
+ NULL,
+};
+
+// strip extensions we don't want to show
+static void remove_extension(char *name) {
+ size_t name_len = strlen(name);
+ for(size_t i = 0; extensions_to_strip[i] != NULL; i++) {
+ const char *ext = extensions_to_strip[i];
+ size_t ext_len = strlen(ext);
+ if(name_len > ext_len) {
+ char *check = &name[name_len - ext_len];
+ if(strcmp(check, ext) == 0) {
+ *check = '\0';
+ break;
+ }
+ }
+ }
+}
+
+static inline STRING *comm_from_cmdline_param_sanitized(STRING *cmdline) {
+ if(!cmdline) return NULL;
+
+ char buf[string_strlen(cmdline) + 1];
+ memcpy(buf, string2str(cmdline), sizeof(buf));
+
+ char *words[100];
+ size_t num_words = quoted_strings_splitter_whitespace(buf, words, 100);
+ for(size_t word = 1; word < num_words ;word++) {
+ char *s = words[word];
+ if(is_filename(s)) {
+ char *name = strrchr(s, '/');
+
+#if defined(OS_WINDOWS)
+ if(!name)
+ name = strrchr(s, '\\');
+#endif
+
+ if(name && *name) {
+ name++;
+ remove_extension(name);
+ sanitize_apps_plugin_chart_meta(name);
+ return string_strdupz(name);
+ }
+ }
+ }
+
+ return NULL;
+}
+
+static inline STRING *comm_from_cmdline_sanitized(STRING *comm, STRING *cmdline) {
+ if(!cmdline) return NULL;
+
+ char buf[string_strlen(cmdline) + 1];
+ memcpy(buf, string2str(cmdline), sizeof(buf));
+
+ size_t comm_len = string_strlen(comm);
+ char *start = strstr(buf, string2str(comm));
+ while (start) {
+ char *end = start + comm_len;
+ while (*end &&
+ !isspace((uint8_t) *end) &&
+ *end != '/' && // path separator - linux
+ *end != '\\' && // path separator - windows
+ *end != '"' && // closing double quotes
+ *end != '\'' && // closing single quotes
+ *end != ')' && // sometimes process add ) at their end
+ *end != ':') // sometimes process add : at their end
+ end++;
+
+ *end = '\0';
+
+ remove_extension(start);
+ sanitize_apps_plugin_chart_meta(start);
+ return string_strdupz(start);
+ }
+
+ return NULL;
+}
+
+static void update_pid_comm_from_cmdline(struct pid_stat *p) {
+ bool updated = false;
+
+ STRING *new_comm = comm_from_cmdline_sanitized(p->comm, p->cmdline);
+ if(new_comm) {
+ string_freez(p->comm);
+ p->comm = new_comm;
+ updated = true;
+ }
+
+ if(is_process_an_interpreter(p)) {
+ new_comm = comm_from_cmdline_param_sanitized(p->cmdline);
+ if(new_comm) {
+ string_freez(p->comm);
+ p->comm = new_comm;
+ updated = true;
+ }
+ }
+
+ if(updated) {
+ p->is_manager = is_process_a_manager(p);
+ p->is_aggregator = is_process_an_aggregator(p);
+ }
+}
+
+void update_pid_cmdline(struct pid_stat *p, const char *cmdline) {
+ string_freez(p->cmdline);
+ p->cmdline = cmdline ? string_strdupz(cmdline) : NULL;
+
+ if(p->cmdline)
+ update_pid_comm_from_cmdline(p);
+}
+
+void update_pid_comm(struct pid_stat *p, const char *comm) {
+ if(p->comm_orig && string_strcmp(p->comm_orig, comm) == 0)
+ // no change
+ return;
+
+ string_freez(p->comm_orig);
+ p->comm_orig = string_strdupz(comm);
+
+ // some process names have ( and ), remove the parenthesis
+ size_t len = strlen(comm);
+ char buf[len + 1];
+ if(comm[0] == '(' && comm[len - 1] == ')') {
+ memcpy(buf, &comm[1], len - 2);
+ buf[len - 2] = '\0';
+ }
+ else
+ memcpy(buf, comm, sizeof(buf));
+
+ sanitize_apps_plugin_chart_meta(buf);
+ p->comm = string_strdupz(buf);
+ p->is_manager = is_process_a_manager(p);
+ p->is_aggregator = is_process_an_aggregator(p);
+
+#if (PROCESSES_HAVE_CMDLINE == 1)
+ if(likely(proc_pid_cmdline_is_needed && !p->cmdline))
+ managed_log(p, PID_LOG_CMDLINE, read_proc_pid_cmdline(p));
+#else
+ update_pid_comm_from_cmdline(p);
+#endif
+
+ // the process changed comm, we may have to reassign it to
+ // an apps_groups.conf target.
+ p->target = NULL;
+}
+
+// --------------------------------------------------------------------------------------------------------------------
+
+#if (PROCESSES_HAVE_CPU_CHILDREN_TIME == 1) || (PROCESSES_HAVE_CHILDREN_FLTS == 1)
+//static inline int debug_print_process_and_parents(struct pid_stat *p, usec_t time) {
+// char *prefix = "\\_ ";
+// int indent = 0;
+//
+// if(p->parent)
+// indent = debug_print_process_and_parents(p->parent, p->stat_collected_usec);
+// else
+// prefix = " > ";
+//
+// char buffer[indent + 1];
+// int i;
+//
+// for(i = 0; i < indent ;i++) buffer[i] = ' ';
+// buffer[i] = '\0';
+//
+// fprintf(stderr, " %s %s%s (%d %s %"PRIu64""
+// , buffer
+// , prefix
+// , pid_stat_comm(p)
+// , p->pid
+// , p->updated?"running":"exited"
+// , p->stat_collected_usec - time
+// );
+//
+// if(p->values[PDF_UTIME]) fprintf(stderr, " utime=" KERNEL_UINT_FORMAT, p->values[PDF_UTIME]);
+// if(p->values[PDF_STIME]) fprintf(stderr, " stime=" KERNEL_UINT_FORMAT, p->values[PDF_STIME]);
+//#if (PROCESSES_HAVE_CPU_GUEST_TIME == 1)
+// if(p->values[PDF_GTIME]) fprintf(stderr, " gtime=" KERNEL_UINT_FORMAT, p->values[PDF_GTIME]);
+//#endif
+//#if (PROCESSES_HAVE_CPU_CHILDREN_TIME == 1)
+// if(p->values[PDF_CUTIME]) fprintf(stderr, " cutime=" KERNEL_UINT_FORMAT, p->values[PDF_CUTIME]);
+// if(p->values[PDF_CSTIME]) fprintf(stderr, " cstime=" KERNEL_UINT_FORMAT, p->values[PDF_CSTIME]);
+//#if (PROCESSES_HAVE_CPU_GUEST_TIME == 1)
+// if(p->values[PDF_CGTIME]) fprintf(stderr, " cgtime=" KERNEL_UINT_FORMAT, p->values[PDF_CGTIME]);
+//#endif
+//#endif
+// if(p->values[PDF_MINFLT]) fprintf(stderr, " minflt=" KERNEL_UINT_FORMAT, p->values[PDF_MINFLT]);
+//#if (PROCESSES_HAVE_MAJFLT == 1)
+// if(p->values[PDF_MAJFLT]) fprintf(stderr, " majflt=" KERNEL_UINT_FORMAT, p->values[PDF_MAJFLT]);
+//#endif
+//#if (PROCESSES_HAVE_CHILDREN_FLTS == 1)
+// if(p->values[PDF_CMINFLT]) fprintf(stderr, " cminflt=" KERNEL_UINT_FORMAT, p->values[PDF_CMINFLT]);
+// if(p->values[PDF_CMAJFLT]) fprintf(stderr, " cmajflt=" KERNEL_UINT_FORMAT, p->values[PDF_CMAJFLT]);
+//#endif
+// fprintf(stderr, ")\n");
+//
+// return indent + 1;
+//}
+//
+//static inline void debug_print_process_tree(struct pid_stat *p, char *msg __maybe_unused) {
+// debug_log("%s: process %s (%d, %s) with parents:", msg, pid_stat_comm(p), p->pid, p->updated?"running":"exited");
+// debug_print_process_and_parents(p, p->stat_collected_usec);
+//}
+//
+//static inline void debug_find_lost_child(struct pid_stat *pe, kernel_uint_t lost, int type) {
+// int found = 0;
+// struct pid_stat *p = NULL;
+//
+// for(p = root_of_pids(); p ; p = p->next) {
+// if(p == pe) continue;
+//
+// switch(type) {
+// case 1:
+//#if (PROCESSES_HAVE_CHILDREN_FLTS == 1)
+// if(p->values[PDF_CMINFLT] > lost) {
+// fprintf(stderr, " > process %d (%s) could use the lost exited child minflt " KERNEL_UINT_FORMAT " of process %d (%s)\n",
+// p->pid, pid_stat_comm(p), lost, pe->pid, pid_stat_comm(pe));
+// found++;
+// }
+//#endif
+// break;
+//
+// case 2:
+//#if (PROCESSES_HAVE_CHILDREN_FLTS == 1)
+// if(p->values[PDF_CMAJFLT] > lost) {
+// fprintf(stderr, " > process %d (%s) could use the lost exited child majflt " KERNEL_UINT_FORMAT " of process %d (%s)\n",
+// p->pid, pid_stat_comm(p), lost, pe->pid, pid_stat_comm(pe));
+// found++;
+// }
+//#endif
+// break;
+//
+// case 3:
+//#if (PROCESSES_HAVE_CPU_CHILDREN_TIME == 1)
+// if(p->values[PDF_CUTIME] > lost) {
+// fprintf(stderr, " > process %d (%s) could use the lost exited child utime " KERNEL_UINT_FORMAT " of process %d (%s)\n",
+// p->pid, pid_stat_comm(p), lost, pe->pid, pid_stat_comm(pe));
+// found++;
+// }
+//#endif
+// break;
+//
+// case 4:
+//#if (PROCESSES_HAVE_CPU_CHILDREN_TIME == 1)
+// if(p->values[PDF_CSTIME] > lost) {
+// fprintf(stderr, " > process %d (%s) could use the lost exited child stime " KERNEL_UINT_FORMAT " of process %d (%s)\n",
+// p->pid, pid_stat_comm(p), lost, pe->pid, pid_stat_comm(pe));
+// found++;
+// }
+//#endif
+// break;
+//
+// case 5:
+//#if (PROCESSES_HAVE_CPU_CHILDREN_TIME == 1) && (PROCESSES_HAVE_CPU_GUEST_TIME == 1)
+// if(p->values[PDF_CGTIME] > lost) {
+// fprintf(stderr, " > process %d (%s) could use the lost exited child gtime " KERNEL_UINT_FORMAT " of process %d (%s)\n",
+// p->pid, pid_stat_comm(p), lost, pe->pid, pid_stat_comm(pe));
+// found++;
+// }
+//#endif
+// break;
+// }
+// }
+//
+// if(!found) {
+// switch(type) {
+// case 1:
+// fprintf(stderr, " > cannot find any process to use the lost exited child minflt " KERNEL_UINT_FORMAT " of process %d (%s)\n",
+// lost, pe->pid, pid_stat_comm(pe));
+// break;
+//
+// case 2:
+// fprintf(stderr, " > cannot find any process to use the lost exited child majflt " KERNEL_UINT_FORMAT " of process %d (%s)\n",
+// lost, pe->pid, pid_stat_comm(pe));
+// break;
+//
+// case 3:
+// fprintf(stderr, " > cannot find any process to use the lost exited child utime " KERNEL_UINT_FORMAT " of process %d (%s)\n",
+// lost, pe->pid, pid_stat_comm(pe));
+// break;
+//
+// case 4:
+// fprintf(stderr, " > cannot find any process to use the lost exited child stime " KERNEL_UINT_FORMAT " of process %d (%s)\n",
+// lost, pe->pid, pid_stat_comm(pe));
+// break;
+//
+// case 5:
+// fprintf(stderr, " > cannot find any process to use the lost exited child gtime " KERNEL_UINT_FORMAT " of process %d (%s)\n",
+// lost, pe->pid, pid_stat_comm(pe));
+// break;
+// }
+// }
+//}
+
+static inline kernel_uint_t remove_exited_child_from_parent(kernel_uint_t *field, kernel_uint_t *pfield) {
+ kernel_uint_t absorbed = 0;
+
+ if(*field > *pfield) {
+ absorbed += *pfield;
+ *field -= *pfield;
+ *pfield = 0;
+ }
+ else {
+ absorbed += *field;
+ *pfield -= *field;
+ *field = 0;
+ }
+
+ return absorbed;
+}
+
+static inline void process_exited_pids(void) {
+ /*
+ * WHY WE NEED THIS?
+ *
+ * When a child process exits in Linux, its accumulated user time (utime) and its children's accumulated
+ * user time (cutime) are added to the parent's cutime. This means the parent process's cutime reflects
+ * the total user time spent by its exited children and their descendants
+ *
+ * This results in spikes in the charts.
+ * In this function we remove the exited children resources from the parent's cutime, but only for the
+ * children we have been monitoring and to the degree we have data for them. Since previously running
+ * children have already been reported by us, removing them is the right thing to do.
+ *
+ */
+
+ for(struct pid_stat *p = root_of_pids(); p ; p = p->next) {
+ if(p->updated || !p->stat_collected_usec)
+ continue;
+
+ bool have_work = false;
+
+#if (PROCESSES_HAVE_CPU_CHILDREN_TIME == 1)
+ kernel_uint_t utime = (p->raw[PDF_UTIME] + p->raw[PDF_CUTIME]) * CPU_TO_NANOSECONDCORES;
+ kernel_uint_t stime = (p->raw[PDF_STIME] + p->raw[PDF_CSTIME]) * CPU_TO_NANOSECONDCORES;
+ if(utime + stime) have_work = true;
+#if (PROCESSES_HAVE_CPU_GUEST_TIME == 1)
+ kernel_uint_t gtime = (p->raw[PDF_GTIME] + p->raw[PDF_CGTIME]) * CPU_TO_NANOSECONDCORES;
+ if(gtime) have_work = true;
+#endif
+#endif
+
+#if (PROCESSES_HAVE_CHILDREN_FLTS == 1)
+ kernel_uint_t minflt = (p->raw[PDF_MINFLT] + p->raw[PDF_CMINFLT]) * RATES_DETAIL;
+ if(minflt) have_work = true;
+#if (PROCESSES_HAVE_MAJFLT == 1)
+ kernel_uint_t majflt = (p->raw[PDF_MAJFLT] + p->raw[PDF_CMAJFLT]) * RATES_DETAIL;
+ if(majflt) have_work = true;
+#endif
+#endif
+
+ if(!have_work)
+ continue;
+
+// if(unlikely(debug_enabled)) {
+// debug_log("Absorb %s (%d %s total resources: utime=" KERNEL_UINT_FORMAT " stime=" KERNEL_UINT_FORMAT " gtime=" KERNEL_UINT_FORMAT " minflt=" KERNEL_UINT_FORMAT " majflt=" KERNEL_UINT_FORMAT ")"
+// , pid_stat_comm(p)
+// , p->pid
+// , p->updated?"running":"exited"
+// , utime
+// , stime
+// , gtime
+// , minflt
+// , majflt
+// );
+// debug_print_process_tree(p, "Searching parents");
+// }
+
+ for(struct pid_stat *pp = p->parent; pp ; pp = pp->parent) {
+ if(!pp->updated) continue;
+
+ kernel_uint_t absorbed;
+#if (PROCESSES_HAVE_CPU_CHILDREN_TIME == 1)
+ absorbed = remove_exited_child_from_parent(&utime, &pp->values[PDF_CUTIME]);
+// if(unlikely(debug_enabled && absorbed))
+// debug_log(" > process %s (%d %s) absorbed " KERNEL_UINT_FORMAT " utime (remaining: " KERNEL_UINT_FORMAT ")",
+// pid_stat_comm(pp), pp->pid, pp->updated?"running":"exited", absorbed, utime);
+
+ absorbed = remove_exited_child_from_parent(&stime, &pp->values[PDF_CSTIME]);
+// if(unlikely(debug_enabled && absorbed))
+// debug_log(" > process %s (%d %s) absorbed " KERNEL_UINT_FORMAT " stime (remaining: " KERNEL_UINT_FORMAT ")",
+// pid_stat_comm(pp), pp->pid, pp->updated?"running":"exited", absorbed, stime);
+
+#if (PROCESSES_HAVE_CPU_GUEST_TIME == 1)
+ absorbed = remove_exited_child_from_parent(&gtime, &pp->values[PDF_CGTIME]);
+// if(unlikely(debug_enabled && absorbed))
+// debug_log(" > process %s (%d %s) absorbed " KERNEL_UINT_FORMAT " gtime (remaining: " KERNEL_UINT_FORMAT ")",
+// pid_stat_comm(pp), pp->pid, pp->updated?"running":"exited", absorbed, gtime);
+#endif
+#endif
+
+#if (PROCESSES_HAVE_CHILDREN_FLTS == 1)
+ absorbed = remove_exited_child_from_parent(&minflt, &pp->values[PDF_CMINFLT]);
+// if(unlikely(debug_enabled && absorbed))
+// debug_log(" > process %s (%d %s) absorbed " KERNEL_UINT_FORMAT " minflt (remaining: " KERNEL_UINT_FORMAT ")",
+// pid_stat_comm(pp), pp->pid, pp->updated?"running":"exited", absorbed, minflt);
+
+#if (PROCESSES_HAVE_MAJFLT == 1)
+ absorbed = remove_exited_child_from_parent(&majflt, &pp->values[PDF_CMAJFLT]);
+// if(unlikely(debug_enabled && absorbed))
+// debug_log(" > process %s (%d %s) absorbed " KERNEL_UINT_FORMAT " majflt (remaining: " KERNEL_UINT_FORMAT ")",
+// pid_stat_comm(pp), pp->pid, pp->updated?"running":"exited", absorbed, majflt);
+#endif
+#endif
+
+ (void)absorbed;
+ break;
+ }
+
+// if(unlikely(debug_enabled)) {
+// if(utime) debug_find_lost_child(p, utime, 3);
+// if(stime) debug_find_lost_child(p, stime, 4);
+// if(gtime) debug_find_lost_child(p, gtime, 5);
+// if(minflt) debug_find_lost_child(p, minflt, 1);
+// if(majflt) debug_find_lost_child(p, majflt, 2);
+// }
+
+// debug_log(" > remaining resources - KEEP - for another loop: %s (%d %s total resources: utime=" KERNEL_UINT_FORMAT " stime=" KERNEL_UINT_FORMAT " gtime=" KERNEL_UINT_FORMAT " minflt=" KERNEL_UINT_FORMAT " majflt=" KERNEL_UINT_FORMAT ")"
+// , pid_stat_comm(p)
+// , p->pid
+// , p->updated?"running":"exited"
+// , utime
+// , stime
+// , gtime
+// , minflt
+// , majflt
+// );
+
+ bool done = true;
+
+#if (PROCESSES_HAVE_CPU_CHILDREN_TIME == 1)
+ p->values[PDF_UTIME] = utime / CPU_TO_NANOSECONDCORES;
+ p->values[PDF_STIME] = stime / CPU_TO_NANOSECONDCORES;
+ p->values[PDF_CUTIME] = 0;
+ p->values[PDF_CSTIME] = 0;
+ if(utime + stime) done = false;
+#if (PROCESSES_HAVE_CPU_GUEST_TIME == 1)
+ p->values[PDF_GTIME] = gtime / CPU_TO_NANOSECONDCORES;
+ p->values[PDF_CGTIME] = 0;
+ if(gtime) done = false;
+#endif
+#endif
+
+#if (PROCESSES_HAVE_CHILDREN_FLTS == 1)
+ p->values[PDF_MINFLT] = minflt / RATES_DETAIL;
+ p->values[PDF_CMINFLT] = 0;
+ if(minflt) done = false;
+#if (PROCESSES_HAVE_MAJFLT == 1)
+ p->values[PDF_MAJFLT] = majflt / RATES_DETAIL;
+ p->values[PDF_CMAJFLT] = 0;
+ if(majflt) done = false;
+#endif
+#endif
+
+ p->keep = !done;
+
+ if(p->keep) {
+ // we need to keep its exited parents too, to ensure we will have
+ // the information to reach the running parent at the next iteration
+ for (struct pid_stat *pp = p->parent; pp; pp = pp->parent) {
+ if (pp->updated) break;
+ pp->keep = true;
+ }
+ }
+ }
+}
+#endif
+
+// --------------------------------------------------------------------------------------------------------------------
+// the main loop for collecting process data
+
+static inline void clear_pid_rates(struct pid_stat *p) {
+ p->values[PDF_UTIME] = 0;
+ p->values[PDF_STIME] = 0;
+
+#if (PROCESSES_HAVE_CPU_GUEST_TIME == 1)
+ p->values[PDF_GTIME] = 0;
+#endif
+
+#if (PROCESSES_HAVE_CPU_CHILDREN_TIME == 1)
+ p->values[PDF_CUTIME] = 0;
+ p->values[PDF_CSTIME] = 0;
+#if (PROCESSES_HAVE_CPU_GUEST_TIME == 1)
+ p->values[PDF_CGTIME] = 0;
+#endif
+#endif
+
+ p->values[PDF_MINFLT] = 0;
+#if (PROCESSES_HAVE_MAJFLT == 1)
+ p->values[PDF_MAJFLT] = 0;
+#endif
+
+#if (PROCESSES_HAVE_CHILDREN_FLTS == 1)
+ p->values[PDF_CMINFLT] = 0;
+ p->values[PDF_CMAJFLT] = 0;
+#endif
+
+#if (PROCESSES_HAVE_LOGICAL_IO == 1)
+ p->values[PDF_LREAD] = 0;
+ p->values[PDF_LWRITE] = 0;
+#endif
+
+#if (PROCESSES_HAVE_PHYSICAL_IO == 1)
+ p->values[PDF_PREAD] = 0;
+ p->values[PDF_PWRITE] = 0;
+#endif
+
+#if (PROCESSES_HAVE_IO_CALLS == 1)
+ p->values[PDF_OREAD] = 0;
+ p->values[PDF_OWRITE] = 0;
+#endif
+
+#if (PROCESSES_HAVE_VOLCTX == 1)
+ p->values[PDF_VOLCTX] = 0;
+#endif
+
+#if (PROCESSES_HAVE_NVOLCTX == 1)
+ p->values[PDF_NVOLCTX] = 0;
+#endif
+}
+
+bool collect_data_for_all_pids(void) {
+ // mark all pids as unread
+#if (INCREMENTAL_DATA_COLLECTION == 0)
+ usec_t now_mon_ut = now_monotonic_usec();
+#endif
+
+ for(struct pid_stat *p = root_of_pids(); p ; p = p->next) {
+ p->read = p->updated = p->merged = false;
+ p->children_count = 0;
+
+#if (INCREMENTAL_DATA_COLLECTION == 0)
+ p->last_stat_collected_usec = p->stat_collected_usec;
+ p->last_io_collected_usec = p->io_collected_usec;
+ p->stat_collected_usec = p->io_collected_usec = now_mon_ut;
+#endif
+ }
+
+ // collect data for all pids
+ if(!OS_FUNCTION(apps_os_collect_all_pids)())
+ return false;
+
+ // build the process tree
+ link_all_processes_to_their_parents();
+
+#if (PROCESSES_HAVE_CPU_CHILDREN_TIME == 1) || (PROCESSES_HAVE_CHILDREN_FLTS == 1)
+ // merge exited pids to their parents
+ process_exited_pids();
+#endif
+
+ // the first iteration needs to be eliminated
+ // since we are looking for rates
+ if(unlikely(global_iterations_counter == 1)) {
+ for(struct pid_stat *p = root_of_pids(); p ; p = p->next)
+ if(p->read) clear_pid_rates(p);
+ }
+
+ return true;
+}
diff --git a/src/collectors/apps.plugin/apps_pid_files.c b/src/collectors/apps.plugin/apps_pid_files.c
new file mode 100644
index 000000000..53e53899c
--- /dev/null
+++ b/src/collectors/apps.plugin/apps_pid_files.c
@@ -0,0 +1,450 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "apps_plugin.h"
+
+static uint32_t
+ all_files_len = 0,
+ all_files_size = 0;
+
+uint32_t all_files_len_get(void) {
+ (void)all_files_size;
+ return all_files_len;
+}
+
+#if (PROCESSES_HAVE_FDS == 1)
+// ----------------------------------------------------------------------------
+// file descriptor
+//
+// this is used to keep a global list of all open files of the system.
+// it is needed in order to calculate the unique files processes have open.
+
+#define FILE_DESCRIPTORS_INCREASE_STEP 100
+
+// types for struct file_descriptor->type
+typedef enum __attribute__((packed)) fd_filetype {
+ FILETYPE_OTHER,
+ FILETYPE_FILE,
+ FILETYPE_PIPE,
+ FILETYPE_SOCKET,
+ FILETYPE_INOTIFY,
+ FILETYPE_EVENTFD,
+ FILETYPE_EVENTPOLL,
+ FILETYPE_TIMERFD,
+ FILETYPE_SIGNALFD
+} FD_FILETYPE;
+
+struct file_descriptor {
+ avl_t avl;
+
+#ifdef NETDATA_INTERNAL_CHECKS
+ uint32_t magic;
+#endif /* NETDATA_INTERNAL_CHECKS */
+
+ const char *name;
+ uint32_t hash;
+ uint32_t count;
+ uint32_t pos;
+ FD_FILETYPE type;
+} *all_files = NULL;
+
+// ----------------------------------------------------------------------------
+
+static inline void reallocate_target_fds(struct target *w) {
+ if(unlikely(!w))
+ return;
+
+ if(unlikely(!w->target_fds || w->target_fds_size < all_files_size)) {
+ w->target_fds = reallocz(w->target_fds, sizeof(int) * all_files_size);
+ memset(&w->target_fds[w->target_fds_size], 0, sizeof(int) * (all_files_size - w->target_fds_size));
+ w->target_fds_size = all_files_size;
+ }
+}
+
+static void aggregage_fd_type_on_openfds(FD_FILETYPE type, struct openfds *openfds) {
+ switch(type) {
+ case FILETYPE_SOCKET:
+ openfds->sockets++;
+ break;
+
+ case FILETYPE_FILE:
+ openfds->files++;
+ break;
+
+ case FILETYPE_PIPE:
+ openfds->pipes++;
+ break;
+
+ case FILETYPE_INOTIFY:
+ openfds->inotifies++;
+ break;
+
+ case FILETYPE_EVENTFD:
+ openfds->eventfds++;
+ break;
+
+ case FILETYPE_TIMERFD:
+ openfds->timerfds++;
+ break;
+
+ case FILETYPE_SIGNALFD:
+ openfds->signalfds++;
+ break;
+
+ case FILETYPE_EVENTPOLL:
+ openfds->eventpolls++;
+ break;
+
+ case FILETYPE_OTHER:
+ openfds->other++;
+ break;
+ }
+}
+
+static inline void aggregate_fd_on_target(int fd, struct target *w) {
+ if(unlikely(!w))
+ return;
+
+ if(unlikely(w->target_fds[fd])) {
+ // it is already aggregated
+ // just increase its usage counter
+ w->target_fds[fd]++;
+ return;
+ }
+
+ // increase its usage counter
+ // so that we will not add it again
+ w->target_fds[fd]++;
+
+ aggregage_fd_type_on_openfds(all_files[fd].type, &w->openfds);
+}
+
+void aggregate_pid_fds_on_targets(struct pid_stat *p) {
+ if(enable_file_charts == CONFIG_BOOLEAN_AUTO && all_files_len > MAX_SYSTEM_FD_TO_ALLOW_FILES_PROCESSING) {
+ nd_log(NDLS_COLLECTORS, NDLP_NOTICE, "apps.plugin: the number of system file descriptors are too many (%u), "
+ "disabling file charts. If you want this enabled, set the 'with-files' "
+ "parameter to [plugin:apps] section of netdata.conf", all_files_size);
+
+ enable_file_charts = CONFIG_BOOLEAN_NO;
+ obsolete_file_charts = true;
+ return;
+ }
+
+ if(unlikely(!p->updated)) {
+ // the process is not running
+ return;
+ }
+
+ struct target
+#if (PROCESSES_HAVE_UID == 1)
+ *u = p->uid_target,
+#endif
+#if (PROCESSES_HAVE_GID == 1)
+ *g = p->gid_target,
+#endif
+ *w = p->target;
+
+ reallocate_target_fds(w);
+#if (PROCESSES_HAVE_UID == 1)
+ reallocate_target_fds(u);
+#endif
+#if (PROCESSES_HAVE_GID == 1)
+ reallocate_target_fds(g);
+#endif
+
+#if (PROCESSES_HAVE_FDS == 1)
+ p->openfds.files = 0;
+ p->openfds.pipes = 0;
+ p->openfds.sockets = 0;
+ p->openfds.inotifies = 0;
+ p->openfds.eventfds = 0;
+ p->openfds.timerfds = 0;
+ p->openfds.signalfds = 0;
+ p->openfds.eventpolls = 0;
+ p->openfds.other = 0;
+
+ uint32_t c, size = p->fds_size;
+ struct pid_fd *fds = p->fds;
+ for(c = 0; c < size ;c++) {
+ int fd = fds[c].fd;
+
+ if(likely(fd <= 0 || (uint32_t)fd >= all_files_size))
+ continue;
+
+ aggregage_fd_type_on_openfds(all_files[fd].type, &p->openfds);
+
+ aggregate_fd_on_target(fd, w);
+#if (PROCESSES_HAVE_UID == 1)
+ aggregate_fd_on_target(fd, u);
+#endif
+#if (PROCESSES_HAVE_GID == 1)
+ aggregate_fd_on_target(fd, g);
+#endif
+ }
+#endif
+}
+
+// ----------------------------------------------------------------------------
+
+int file_descriptor_compare(void* a, void* b) {
+#ifdef NETDATA_INTERNAL_CHECKS
+ if(((struct file_descriptor *)a)->magic != 0x0BADCAFE || ((struct file_descriptor *)b)->magic != 0x0BADCAFE)
+ netdata_log_error("Corrupted index data detected. Please report this.");
+#endif /* NETDATA_INTERNAL_CHECKS */
+
+ if(((struct file_descriptor *)a)->hash < ((struct file_descriptor *)b)->hash)
+ return -1;
+
+ else if(((struct file_descriptor *)a)->hash > ((struct file_descriptor *)b)->hash)
+ return 1;
+
+ else
+ return strcmp(((struct file_descriptor *)a)->name, ((struct file_descriptor *)b)->name);
+}
+
+// int file_descriptor_iterator(avl_t *a) { if(a) {}; return 0; }
+
+avl_tree_type all_files_index = {
+ NULL,
+ file_descriptor_compare
+};
+
+static struct file_descriptor *file_descriptor_find(const char *name, uint32_t hash) {
+ struct file_descriptor tmp;
+ tmp.hash = (hash)?hash:simple_hash(name);
+ tmp.name = name;
+ tmp.count = 0;
+ tmp.pos = 0;
+#ifdef NETDATA_INTERNAL_CHECKS
+ tmp.magic = 0x0BADCAFE;
+#endif /* NETDATA_INTERNAL_CHECKS */
+
+ return (struct file_descriptor *)avl_search(&all_files_index, (avl_t *) &tmp);
+}
+
+#define file_descriptor_add(fd) avl_insert(&all_files_index, (avl_t *)(fd))
+#define file_descriptor_remove(fd) avl_remove(&all_files_index, (avl_t *)(fd))
+
+// ----------------------------------------------------------------------------
+
+void file_descriptor_not_used(int id) {
+ if(id > 0 && (uint32_t)id < all_files_size) {
+
+#ifdef NETDATA_INTERNAL_CHECKS
+ if(all_files[id].magic != 0x0BADCAFE) {
+ netdata_log_error("Ignoring request to remove empty file id %d.", id);
+ return;
+ }
+#endif /* NETDATA_INTERNAL_CHECKS */
+
+ debug_log("decreasing slot %d (count = %d).", id, all_files[id].count);
+
+ if(all_files[id].count > 0) {
+ all_files[id].count--;
+
+ if(!all_files[id].count) {
+ debug_log(" >> slot %d is empty.", id);
+
+ if(unlikely(file_descriptor_remove(&all_files[id]) != (void *)&all_files[id]))
+ netdata_log_error("INTERNAL ERROR: removal of unused fd from index, removed a different fd");
+
+#ifdef NETDATA_INTERNAL_CHECKS
+ all_files[id].magic = 0x00000000;
+#endif /* NETDATA_INTERNAL_CHECKS */
+ all_files_len--;
+ }
+ }
+ else
+ netdata_log_error("Request to decrease counter of fd %d (%s), while the use counter is 0",
+ id, all_files[id].name);
+ }
+ else
+ netdata_log_error("Request to decrease counter of fd %d, which is outside the array size (1 to %"PRIu32")",
+ id, all_files_size);
+}
+
+static inline void all_files_grow() {
+ void *old = all_files;
+
+ uint32_t new_size = (all_files_size > 0) ? all_files_size * 2 : 2048;
+
+ // there is no empty slot
+ all_files = reallocz(all_files, new_size * sizeof(struct file_descriptor));
+
+ // if the address changed, we have to rebuild the index
+ // since all pointers are now invalid
+
+ if(unlikely(old && old != (void *)all_files)) {
+ all_files_index.root = NULL;
+ for(uint32_t i = 0; i < all_files_size; i++) {
+ if(!all_files[i].count) continue;
+ if(unlikely(file_descriptor_add(&all_files[i]) != (void *)&all_files[i]))
+ netdata_log_error("INTERNAL ERROR: duplicate indexing of fd during realloc.");
+ }
+ }
+
+ // initialize the newly added entries
+
+ for(uint32_t i = all_files_size; i < new_size; i++) {
+ all_files[i].count = 0;
+ all_files[i].name = NULL;
+#ifdef NETDATA_INTERNAL_CHECKS
+ all_files[i].magic = 0x00000000;
+#endif /* NETDATA_INTERNAL_CHECKS */
+ all_files[i].pos = i;
+ }
+
+ if(unlikely(!all_files_size)) all_files_len = 1;
+ all_files_size = new_size;
+}
+
+static inline uint32_t file_descriptor_set_on_empty_slot(const char *name, uint32_t hash, FD_FILETYPE type) {
+ // check we have enough memory to add it
+ if(!all_files || all_files_len == all_files_size)
+ all_files_grow();
+
+ debug_log(" >> searching for empty slot.");
+
+ // search for an empty slot
+
+ static int last_pos = 0;
+ uint32_t i, c;
+ for(i = 0, c = last_pos ; i < all_files_size ; i++, c++) {
+ if(c >= all_files_size) c = 0;
+ if(c == 0) continue;
+
+ if(!all_files[c].count) {
+ debug_log(" >> Examining slot %d.", c);
+
+#ifdef NETDATA_INTERNAL_CHECKS
+ if(all_files[c].magic == 0x0BADCAFE && all_files[c].name && file_descriptor_find(all_files[c].name, all_files[c].hash))
+ netdata_log_error("fd on position %"PRIu32" is not cleared properly. It still has %s in it.", c, all_files[c].name);
+#endif /* NETDATA_INTERNAL_CHECKS */
+
+ debug_log(" >> %s fd position %d for %s (last name: %s)", all_files[c].name?"re-using":"using", c, name, all_files[c].name);
+
+ freez((void *)all_files[c].name);
+ all_files[c].name = NULL;
+ last_pos = c;
+ break;
+ }
+ }
+
+ all_files_len++;
+
+ if(i == all_files_size) {
+ fatal("We should find an empty slot, but there isn't any");
+ exit(1);
+ }
+ // else we have an empty slot in 'c'
+
+ debug_log(" >> updating slot %d.", c);
+
+ all_files[c].name = strdupz(name);
+ all_files[c].hash = hash;
+ all_files[c].type = type;
+ all_files[c].pos = c;
+ all_files[c].count = 1;
+#ifdef NETDATA_INTERNAL_CHECKS
+ all_files[c].magic = 0x0BADCAFE;
+#endif /* NETDATA_INTERNAL_CHECKS */
+ if(unlikely(file_descriptor_add(&all_files[c]) != (void *)&all_files[c]))
+ netdata_log_error("INTERNAL ERROR: duplicate indexing of fd.");
+
+ return c;
+}
+
+uint32_t file_descriptor_find_or_add(const char *name, uint32_t hash) {
+ if(unlikely(!hash))
+ hash = simple_hash(name);
+
+ debug_log("adding or finding name '%s' with hash %u", name, hash);
+
+ struct file_descriptor *fd = file_descriptor_find(name, hash);
+ if(fd) {
+ // found
+ debug_log(" >> found on slot %d", fd->pos);
+
+ fd->count++;
+ return fd->pos;
+ }
+ // not found
+
+ FD_FILETYPE type;
+ if(likely(name[0] == '/')) type = FILETYPE_FILE;
+ else if(likely(strncmp(name, "pipe:", 5) == 0)) type = FILETYPE_PIPE;
+ else if(likely(strncmp(name, "socket:", 7) == 0)) type = FILETYPE_SOCKET;
+ else if(likely(strncmp(name, "anon_inode:", 11) == 0)) {
+ const char *t = &name[11];
+
+ if(strcmp(t, "inotify") == 0) type = FILETYPE_INOTIFY;
+ else if(strcmp(t, "[eventfd]") == 0) type = FILETYPE_EVENTFD;
+ else if(strcmp(t, "[eventpoll]") == 0) type = FILETYPE_EVENTPOLL;
+ else if(strcmp(t, "[timerfd]") == 0) type = FILETYPE_TIMERFD;
+ else if(strcmp(t, "[signalfd]") == 0) type = FILETYPE_SIGNALFD;
+ else {
+ debug_log("UNKNOWN anonymous inode: %s", name);
+ type = FILETYPE_OTHER;
+ }
+ }
+ else if(likely(strcmp(name, "inotify") == 0)) type = FILETYPE_INOTIFY;
+ else {
+ debug_log("UNKNOWN linkname: %s", name);
+ type = FILETYPE_OTHER;
+ }
+
+ return file_descriptor_set_on_empty_slot(name, hash, type);
+}
+
+void clear_pid_fd(struct pid_fd *pfd) {
+ pfd->fd = 0;
+
+#if defined(OS_LINUX)
+ pfd->link_hash = 0;
+ pfd->inode = 0;
+ pfd->cache_iterations_counter = 0;
+ pfd->cache_iterations_reset = 0;
+#endif
+}
+
+void make_all_pid_fds_negative(struct pid_stat *p) {
+ struct pid_fd *pfd = p->fds, *pfdend = &p->fds[p->fds_size];
+ while(pfd < pfdend) {
+ pfd->fd = -(pfd->fd);
+ pfd++;
+ }
+}
+
+static inline void cleanup_negative_pid_fds(struct pid_stat *p) {
+ struct pid_fd *pfd = p->fds, *pfdend = &p->fds[p->fds_size];
+
+ while(pfd < pfdend) {
+ int fd = pfd->fd;
+
+ if(unlikely(fd < 0)) {
+ file_descriptor_not_used(-(fd));
+ clear_pid_fd(pfd);
+ }
+
+ pfd++;
+ }
+}
+
+void init_pid_fds(struct pid_stat *p, size_t first, size_t size) {
+ struct pid_fd *pfd = &p->fds[first], *pfdend = &p->fds[first + size];
+
+ while(pfd < pfdend) {
+#if defined(OS_LINUX)
+ pfd->filename = NULL;
+#endif
+ clear_pid_fd(pfd);
+ pfd++;
+ }
+}
+
+int read_pid_file_descriptors(struct pid_stat *p, void *ptr) {
+ bool ret = OS_FUNCTION(apps_os_read_pid_fds)(p, ptr);
+ cleanup_negative_pid_fds(p);
+
+ return ret ? 1 : 0;
+}
+#endif \ No newline at end of file
diff --git a/src/collectors/apps.plugin/apps_pid_match.c b/src/collectors/apps.plugin/apps_pid_match.c
new file mode 100644
index 000000000..121899b09
--- /dev/null
+++ b/src/collectors/apps.plugin/apps_pid_match.c
@@ -0,0 +1,90 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "apps_plugin.h"
+
+bool pid_match_check(struct pid_stat *p, APPS_MATCH *match) {
+ if(!match->starts_with && !match->ends_with) {
+ if(match->pattern) {
+ if(simple_pattern_matches_string(match->pattern, p->comm))
+ return true;
+ }
+ else {
+ if(match->compare == p->comm || match->compare == p->comm_orig)
+ return true;
+ }
+ }
+ else if(match->starts_with && !match->ends_with) {
+ if(match->pattern) {
+ if(simple_pattern_matches_string(match->pattern, p->comm))
+ return true;
+ }
+ else {
+ if(string_starts_with_string(p->comm, match->compare) ||
+ (p->comm != p->comm_orig && string_starts_with_string(p->comm, match->compare)))
+ return true;
+ }
+ }
+ else if(!match->starts_with && match->ends_with) {
+ if(match->pattern) {
+ if(simple_pattern_matches_string(match->pattern, p->comm))
+ return true;
+ }
+ else {
+ if(string_ends_with_string(p->comm, match->compare) ||
+ (p->comm != p->comm_orig && string_ends_with_string(p->comm, match->compare)))
+ return true;
+ }
+ }
+ else if(match->starts_with && match->ends_with && p->cmdline) {
+ if(match->pattern) {
+ if(simple_pattern_matches_string(match->pattern, p->cmdline))
+ return true;
+ }
+ else {
+ if(strstr(string2str(p->cmdline), string2str(match->compare)))
+ return true;
+ }
+ }
+
+ return false;
+}
+
+APPS_MATCH pid_match_create(const char *comm) {
+ APPS_MATCH m = {
+ .starts_with = false,
+ .ends_with = false,
+ .compare = NULL,
+ .pattern = NULL,
+ };
+
+ // copy comm to make changes to it
+ size_t len = strlen(comm);
+ char buf[len + 1];
+ memcpy(buf, comm, sizeof(buf));
+
+ trim_all(buf);
+
+ if(buf[len - 1] == '*') {
+ buf[--len] = '\0';
+ m.starts_with = true;
+ }
+
+ const char *nid = buf;
+ if (nid[0] == '*') {
+ m.ends_with = true;
+ nid++;
+ }
+
+ m.compare = string_strdupz(nid);
+
+ if(strchr(nid, '*'))
+ m.pattern = simple_pattern_create(comm, SIMPLE_PATTERN_NO_SEPARATORS, SIMPLE_PATTERN_EXACT, true);
+
+ return m;
+}
+
+void pid_match_cleanup(APPS_MATCH *m) {
+ string_freez(m->compare);
+ simple_pattern_free(m->pattern);
+}
+
diff --git a/src/collectors/apps.plugin/apps_plugin.c b/src/collectors/apps.plugin/apps_plugin.c
index 8fe1ff008..b8ea0e797 100644
--- a/src/collectors/apps.plugin/apps_plugin.c
+++ b/src/collectors/apps.plugin/apps_plugin.c
@@ -27,18 +27,21 @@
// options
bool debug_enabled = false;
-bool enable_guest_charts = false;
+
bool enable_detailed_uptime_charts = false;
bool enable_users_charts = true;
bool enable_groups_charts = true;
bool include_exited_childs = true;
-bool proc_pid_cmdline_is_needed = false; // true when we need to read /proc/cmdline
-
-#if defined(__FreeBSD__) || defined(__APPLE__)
-bool enable_file_charts = false;
-#else
-bool enable_file_charts = true;
+bool proc_pid_cmdline_is_needed = true; // true when we need to read /proc/cmdline
+
+#if defined(OS_FREEBSD) || defined(OS_MACOS)
+int enable_file_charts = CONFIG_BOOLEAN_NO;
+#elif defined(OS_LINUX)
+int enable_file_charts = CONFIG_BOOLEAN_AUTO;
+#elif defined(OS_WINDOWS)
+int enable_file_charts = CONFIG_BOOLEAN_YES;
#endif
+bool obsolete_file_charts = false;
// ----------------------------------------------------------------------------
// internal counters
@@ -53,19 +56,16 @@ size_t
targets_assignment_counter = 0,
apps_groups_targets_count = 0; // # of apps_groups.conf targets
-int
- all_files_len = 0,
- all_files_size = 0,
- show_guest_time = 0, // 1 when guest values are collected
- show_guest_time_old = 0;
-
-#if defined(__FreeBSD__) || defined(__APPLE__)
-usec_t system_current_time_ut;
-#else
-kernel_uint_t system_uptime_secs;
+#if (PROCESSES_HAVE_CPU_GUEST_TIME == 1)
+bool enable_guest_charts = false;
+bool show_guest_time = false; // set when guest values are collected
#endif
-// ----------------------------------------------------------------------------
+uint32_t
+ all_files_len = 0,
+ all_files_size = 0;
+
+// --------------------------------------------------------------------------------------------------------------------
// Normalization
//
// With normalization we lower the collected metrics by a factor to make them
@@ -74,16 +74,18 @@ kernel_uint_t system_uptime_secs;
// the metrics. This results in utilization that exceeds the total utilization
// of the system.
//
-// During normalization, we align the per-process utilization, to the total of
-// the system. We first consume the exited children utilization and it the
-// collected values is above the total, we proportionally scale each reported
-// metric.
+// During normalization, we align the per-process utilization to the global
+// utilization of the system. We first consume the exited children utilization
+// and it the collected values is above the total, we proportionally scale each
+// reported metric.
// the total system time, as reported by /proc/stat
+#if (ALL_PIDS_ARE_READ_INSTANTLY == 0)
kernel_uint_t
global_utime = 0,
global_stime = 0,
global_gtime = 0;
+#endif
// the normalization ratios, as calculated by normalize_utilization()
NETDATA_DOUBLE
@@ -98,21 +100,11 @@ NETDATA_DOUBLE
cminflt_fix_ratio = 1.0,
cmajflt_fix_ratio = 1.0;
-// ----------------------------------------------------------------------------
-// factor for calculating correct CPU time values depending on units of raw data
-unsigned int time_factor = 0;
-
-// ----------------------------------------------------------------------------
-// command line options
+// --------------------------------------------------------------------------------------------------------------------
int update_every = 1;
-#if defined(__APPLE__)
-mach_timebase_info_data_t mach_info;
-#endif
-
-#if !defined(__FreeBSD__) && !defined(__APPLE__)
-int max_fds_cache_seconds = 60;
+#if defined(OS_LINUX)
proc_state proc_state_count[PROC_STATUS_END];
const char *proc_states[] = {
[PROC_STATUS_RUNNING] = "running",
@@ -127,420 +119,104 @@ const char *proc_states[] = {
static char *user_config_dir = CONFIG_DIR;
static char *stock_config_dir = LIBCONFIG_DIR;
-struct target
- *apps_groups_default_target = NULL, // the default target
- *apps_groups_root_target = NULL, // apps_groups.conf defined
- *users_root_target = NULL, // users
- *groups_root_target = NULL; // user groups
-
size_t pagesize;
-// ----------------------------------------------------------------------------
-
-int managed_log(struct pid_stat *p, PID_LOG log, int status) {
- if(unlikely(!status)) {
- // netdata_log_error("command failed log %u, errno %d", log, errno);
-
- if(unlikely(debug_enabled || errno != ENOENT)) {
- if(unlikely(debug_enabled || !(p->log_thrown & log))) {
- p->log_thrown |= log;
- switch(log) {
- case PID_LOG_IO:
- #if defined(__FreeBSD__) || defined(__APPLE__)
- netdata_log_error("Cannot fetch process %d I/O info (command '%s')", p->pid, p->comm);
- #else
- netdata_log_error("Cannot process %s/proc/%d/io (command '%s')", netdata_configured_host_prefix, p->pid, p->comm);
- #endif
- break;
-
- case PID_LOG_STATUS:
- #if defined(__FreeBSD__) || defined(__APPLE__)
- netdata_log_error("Cannot fetch process %d status info (command '%s')", p->pid, p->comm);
- #else
- netdata_log_error("Cannot process %s/proc/%d/status (command '%s')", netdata_configured_host_prefix, p->pid, p->comm);
- #endif
- break;
-
- case PID_LOG_CMDLINE:
- #if defined(__FreeBSD__) || defined(__APPLE__)
- netdata_log_error("Cannot fetch process %d command line (command '%s')", p->pid, p->comm);
- #else
- netdata_log_error("Cannot process %s/proc/%d/cmdline (command '%s')", netdata_configured_host_prefix, p->pid, p->comm);
- #endif
- break;
-
- case PID_LOG_FDS:
- #if defined(__FreeBSD__) || defined(__APPLE__)
- netdata_log_error("Cannot fetch process %d files (command '%s')", p->pid, p->comm);
- #else
- netdata_log_error("Cannot process entries in %s/proc/%d/fd (command '%s')", netdata_configured_host_prefix, p->pid, p->comm);
- #endif
- break;
-
- case PID_LOG_LIMITS:
- #if defined(__FreeBSD__) || defined(__APPLE__)
- ;
- #else
- netdata_log_error("Cannot process %s/proc/%d/limits (command '%s')", netdata_configured_host_prefix, p->pid, p->comm);
- #endif
-
- case PID_LOG_STAT:
- break;
-
- default:
- netdata_log_error("unhandled error for pid %d, command '%s'", p->pid, p->comm);
- break;
- }
- }
- }
- errno_clear();
- }
- else if(unlikely(p->log_thrown & log)) {
- // netdata_log_error("unsetting log %u on pid %d", log, p->pid);
- p->log_thrown &= ~log;
- }
-
- return status;
+void sanitize_apps_plugin_chart_meta(char *buf) {
+ external_plugins_sanitize(buf, buf, strlen(buf) + 1);
}
// ----------------------------------------------------------------------------
-// update statistics on the targets
-
-// 1. link all childs to their parents
-// 2. go from bottom to top, marking as merged all children to their parents,
-// this step links all parents without a target to the child target, if any
-// 3. link all top level processes (the ones not merged) to default target
-// 4. go from top to bottom, linking all children without a target to their parent target
-// after this step all processes have a target.
-// [5. for each killed pid (updated = 0), remove its usage from its target]
-// 6. zero all apps_groups_targets
-// 7. concentrate all values on the apps_groups_targets
-// 8. remove all killed processes
-// 9. find the unique file count for each target
-// check: update_apps_groups_statistics()
-
-static void apply_apps_groups_targets_inheritance(void) {
- struct pid_stat *p = NULL;
-
- // children that do not have a target
- // inherit their target from their parent
- int found = 1, loops = 0;
- while(found) {
- if(unlikely(debug_enabled)) loops++;
- found = 0;
- for(p = root_of_pids; p ; p = p->next) {
- // if this process does not have a target,
- // and it has a parent
- // and its parent has a target
- // then, set the parent's target to this process
- if(unlikely(!p->target && p->parent && p->parent->target)) {
- p->target = p->parent->target;
- found++;
-
- if(debug_enabled || (p->target && p->target->debug_enabled))
- debug_log_int("TARGET INHERITANCE: %s is inherited by %d (%s) from its parent %d (%s).", p->target->name, p->pid, p->comm, p->parent->pid, p->parent->comm);
- }
- }
- }
-
- // find all the procs with 0 childs and merge them to their parents
- // repeat, until nothing more can be done.
- int sortlist = 1;
- found = 1;
- while(found) {
- if(unlikely(debug_enabled)) loops++;
- found = 0;
-
- for(p = root_of_pids; p ; p = p->next) {
- if(unlikely(!p->sortlist && !p->children_count))
- p->sortlist = sortlist++;
-
- if(unlikely(
- !p->children_count // if this process does not have any children
- && !p->merged // and is not already merged
- && p->parent // and has a parent
- && p->parent->children_count // and its parent has children
- // and the target of this process and its parent is the same,
- // or the parent does not have a target
- && (p->target == p->parent->target || !p->parent->target)
- && p->ppid != INIT_PID // and its parent is not init
- )) {
- // mark it as merged
- p->parent->children_count--;
- p->merged = true;
-
- // the parent inherits the child's target, if it does not have a target itself
- if(unlikely(p->target && !p->parent->target)) {
- p->parent->target = p->target;
-
- if(debug_enabled || (p->target && p->target->debug_enabled))
- debug_log_int("TARGET INHERITANCE: %s is inherited by %d (%s) from its child %d (%s).", p->target->name, p->parent->pid, p->parent->comm, p->pid, p->comm);
- }
-
- found++;
- }
- }
-
- debug_log("TARGET INHERITANCE: merged %d processes", found);
- }
-
- // init goes always to default target
- struct pid_stat *pi = find_pid_entry(INIT_PID);
- if(pi && !pi->matched_by_config)
- pi->target = apps_groups_default_target;
-
- // pid 0 goes always to default target
- pi = find_pid_entry(0);
- if(pi && !pi->matched_by_config)
- pi->target = apps_groups_default_target;
-
- // give a default target on all top level processes
- if(unlikely(debug_enabled)) loops++;
- for(p = root_of_pids; p ; p = p->next) {
- // if the process is not merged itself
- // then it is a top level process
- if(unlikely(!p->merged && !p->target))
- p->target = apps_groups_default_target;
-
- // make sure all processes have a sortlist
- if(unlikely(!p->sortlist))
- p->sortlist = sortlist++;
- }
-
- pi = find_pid_entry(1);
- if(pi)
- pi->sortlist = sortlist++;
-
- // give a target to all merged child processes
- found = 1;
- while(found) {
- if(unlikely(debug_enabled)) loops++;
- found = 0;
- for(p = root_of_pids; p ; p = p->next) {
- if(unlikely(!p->target && p->merged && p->parent && p->parent->target)) {
- p->target = p->parent->target;
- found++;
-
- if(debug_enabled || (p->target && p->target->debug_enabled))
- debug_log_int("TARGET INHERITANCE: %s is inherited by %d (%s) from its parent %d (%s) at phase 2.", p->target->name, p->pid, p->comm, p->parent->pid, p->parent->comm);
- }
- }
- }
-
- debug_log("apply_apps_groups_targets_inheritance() made %d loops on the process tree", loops);
-}
-
-static size_t zero_all_targets(struct target *root) {
- struct target *w;
- size_t count = 0;
-
- for (w = root; w ; w = w->next) {
- count++;
-
- w->minflt = 0;
- w->majflt = 0;
- w->utime = 0;
- w->stime = 0;
- w->gtime = 0;
- w->cminflt = 0;
- w->cmajflt = 0;
- w->cutime = 0;
- w->cstime = 0;
- w->cgtime = 0;
- w->num_threads = 0;
- // w->rss = 0;
- w->processes = 0;
-
- w->status_vmsize = 0;
- w->status_vmrss = 0;
- w->status_vmshared = 0;
- w->status_rssfile = 0;
- w->status_rssshmem = 0;
- w->status_vmswap = 0;
- w->status_voluntary_ctxt_switches = 0;
- w->status_nonvoluntary_ctxt_switches = 0;
-
- w->io_logical_bytes_read = 0;
- w->io_logical_bytes_written = 0;
- w->io_read_calls = 0;
- w->io_write_calls = 0;
- w->io_storage_bytes_read = 0;
- w->io_storage_bytes_written = 0;
- w->io_cancelled_write_bytes = 0;
-
- // zero file counters
- if(w->target_fds) {
- memset(w->target_fds, 0, sizeof(int) * w->target_fds_size);
- w->openfds.files = 0;
- w->openfds.pipes = 0;
- w->openfds.sockets = 0;
- w->openfds.inotifies = 0;
- w->openfds.eventfds = 0;
- w->openfds.timerfds = 0;
- w->openfds.signalfds = 0;
- w->openfds.eventpolls = 0;
- w->openfds.other = 0;
-
- w->max_open_files_percent = 0.0;
- }
-
- w->uptime_min = 0;
- w->uptime_sum = 0;
- w->uptime_max = 0;
-
- if(unlikely(w->root_pid)) {
- struct pid_on_target *pid_on_target = w->root_pid;
+// update chart dimensions
- while(pid_on_target) {
- struct pid_on_target *pid_on_target_to_free = pid_on_target;
- pid_on_target = pid_on_target->next;
- freez(pid_on_target_to_free);
- }
+// Helper function to count the number of processes in the linked list
+int count_processes(struct pid_stat *root) {
+ int count = 0;
- w->root_pid = NULL;
- }
- }
+ for(struct pid_stat *p = root; p ; p = p->next)
+ if(p->updated) count++;
return count;
}
-static inline void aggregate_pid_on_target(struct target *w, struct pid_stat *p, struct target *o) {
- (void)o;
-
- if(unlikely(!p->updated)) {
- // the process is not running
- return;
- }
-
- if(unlikely(!w)) {
- netdata_log_error("pid %d %s was left without a target!", p->pid, p->comm);
- return;
- }
-
- if(p->openfds_limits_percent > w->max_open_files_percent)
- w->max_open_files_percent = p->openfds_limits_percent;
-
- w->cutime += p->cutime;
- w->cstime += p->cstime;
- w->cgtime += p->cgtime;
- w->cminflt += p->cminflt;
- w->cmajflt += p->cmajflt;
-
- w->utime += p->utime;
- w->stime += p->stime;
- w->gtime += p->gtime;
- w->minflt += p->minflt;
- w->majflt += p->majflt;
-
- // w->rss += p->rss;
-
- w->status_vmsize += p->status_vmsize;
- w->status_vmrss += p->status_vmrss;
- w->status_vmshared += p->status_vmshared;
- w->status_rssfile += p->status_rssfile;
- w->status_rssshmem += p->status_rssshmem;
- w->status_vmswap += p->status_vmswap;
- w->status_voluntary_ctxt_switches += p->status_voluntary_ctxt_switches;
- w->status_nonvoluntary_ctxt_switches += p->status_nonvoluntary_ctxt_switches;
-
- w->io_logical_bytes_read += p->io_logical_bytes_read;
- w->io_logical_bytes_written += p->io_logical_bytes_written;
- w->io_read_calls += p->io_read_calls;
- w->io_write_calls += p->io_write_calls;
- w->io_storage_bytes_read += p->io_storage_bytes_read;
- w->io_storage_bytes_written += p->io_storage_bytes_written;
- w->io_cancelled_write_bytes += p->io_cancelled_write_bytes;
-
- w->processes++;
- w->num_threads += p->num_threads;
-
- if(!w->uptime_min || p->uptime < w->uptime_min) w->uptime_min = p->uptime;
- if(!w->uptime_max || w->uptime_max < p->uptime) w->uptime_max = p->uptime;
- w->uptime_sum += p->uptime;
-
- if(unlikely(debug_enabled || w->debug_enabled)) {
- debug_log_int("aggregating '%s' pid %d on target '%s' utime=" KERNEL_UINT_FORMAT ", stime=" KERNEL_UINT_FORMAT ", gtime=" KERNEL_UINT_FORMAT ", cutime=" KERNEL_UINT_FORMAT ", cstime=" KERNEL_UINT_FORMAT ", cgtime=" KERNEL_UINT_FORMAT ", minflt=" KERNEL_UINT_FORMAT ", majflt=" KERNEL_UINT_FORMAT ", cminflt=" KERNEL_UINT_FORMAT ", cmajflt=" KERNEL_UINT_FORMAT "", p->comm, p->pid, w->name, p->utime, p->stime, p->gtime, p->cutime, p->cstime, p->cgtime, p->minflt, p->majflt, p->cminflt, p->cmajflt);
-
- struct pid_on_target *pid_on_target = mallocz(sizeof(struct pid_on_target));
- pid_on_target->pid = p->pid;
- pid_on_target->next = w->root_pid;
- w->root_pid = pid_on_target;
- }
+// Comparator function to sort by pid
+int compare_by_pid(const void *a, const void *b) {
+ struct pid_stat *pa = *(struct pid_stat **)a;
+ struct pid_stat *pb = *(struct pid_stat **)b;
+ return ((int)pa->pid - (int)pb->pid);
}
-static void calculate_netdata_statistics(void) {
- apply_apps_groups_targets_inheritance();
-
- zero_all_targets(users_root_target);
- zero_all_targets(groups_root_target);
- apps_groups_targets_count = zero_all_targets(apps_groups_root_target);
-
- // this has to be done, before the cleanup
- struct pid_stat *p = NULL;
- struct target *w = NULL, *o = NULL;
-
- // concentrate everything on the targets
- for(p = root_of_pids; p ; p = p->next) {
-
- // --------------------------------------------------------------------
- // apps_groups target
-
- aggregate_pid_on_target(p->target, p, NULL);
+// Function to print a process and its children recursively
+void print_process_tree(struct pid_stat *root, struct pid_stat *parent, int depth, int total_processes) {
+ // Allocate an array of pointers for processes with the given parent
+ struct pid_stat **children = (struct pid_stat **)malloc(total_processes * sizeof(struct pid_stat *));
+ int children_count = 0;
-
- // --------------------------------------------------------------------
- // user target
-
- o = p->user_target;
- if(likely(p->user_target && p->user_target->uid == p->uid))
- w = p->user_target;
- else {
- if(unlikely(debug_enabled && p->user_target))
- debug_log("pid %d (%s) switched user from %u (%s) to %u.", p->pid, p->comm, p->user_target->uid, p->user_target->name, p->uid);
-
- w = p->user_target = get_users_target(p->uid);
+ // Populate the array with processes that have the given parent
+ struct pid_stat *p = root;
+ while (p != NULL) {
+ if (p->updated && p->parent == parent) {
+ children[children_count++] = p;
}
+ p = p->next;
+ }
- aggregate_pid_on_target(w, p, o);
-
-
- // --------------------------------------------------------------------
- // user group target
-
- o = p->group_target;
- if(likely(p->group_target && p->group_target->gid == p->gid))
- w = p->group_target;
- else {
- if(unlikely(debug_enabled && p->group_target))
- debug_log("pid %d (%s) switched group from %u (%s) to %u.", p->pid, p->comm, p->group_target->gid, p->group_target->name, p->gid);
+ // Sort the children array by pid
+ qsort(children, children_count, sizeof(struct pid_stat *), compare_by_pid);
- w = p->group_target = get_groups_target(p->gid);
+ // Print each child and recurse
+ for (int i = 0; i < children_count; i++) {
+ // Print the current process with indentation based on depth
+ if (depth > 0) {
+ for (int j = 0; j < (depth - 1) * 4; j++) {
+ printf(" ");
+ }
+ printf(" \\_ ");
}
- aggregate_pid_on_target(w, p, o);
+#if (PROCESSES_HAVE_COMM_AND_NAME == 1)
+ printf("[%d] %s (name: %s) [%s]: %s\n", children[i]->pid,
+ string2str(children[i]->comm),
+ string2str(children[i]->name),
+ string2str(children[i]->target->name),
+ string2str(children[i]->cmdline));
+#else
+ printf("[%d] orig: '%s' new: '%s' [target: %s]: cmdline: %s\n", children[i]->pid,
+ string2str(children[i]->comm_orig),
+ string2str(children[i]->comm),
+ string2str(children[i]->target->name),
+ string2str(children[i]->cmdline));
+#endif
+ // Recurse to print this child's children
+ print_process_tree(root, children[i], depth + 1, total_processes);
+ }
- // --------------------------------------------------------------------
- // aggregate all file descriptors
+ // Free the allocated array
+ free(children);
+}
- if(enable_file_charts)
- aggregate_pid_fds_on_targets(p);
- }
+// Function to print the full hierarchy
+void print_hierarchy(struct pid_stat *root) {
+ // Count the total number of processes
+ int total_processes = count_processes(root);
- cleanup_exited_pids();
+ // Start printing from processes with parent = NULL (i.e., root processes)
+ print_process_tree(root, NULL, 0, total_processes);
}
// ----------------------------------------------------------------------------
// update chart dimensions
+#if (ALL_PIDS_ARE_READ_INSTANTLY == 0)
static void normalize_utilization(struct target *root) {
struct target *w;
- // childs processing introduces spikes
- // here we try to eliminate them by disabling childs processing either for specific dimensions
- // or entirely. Of course, either way, we disable it just a single iteration.
+ // children processing introduces spikes,
+ // here we try to eliminate them by disabling children processing either
+ // for specific dimensions or entirely.
+ // of course, either way, we disable it just for a single iteration.
- kernel_uint_t max_time = os_get_system_cpus() * time_factor * RATES_DETAIL;
+ kernel_uint_t max_time = os_get_system_cpus() * NSEC_PER_SEC;
kernel_uint_t utime = 0, cutime = 0, stime = 0, cstime = 0, gtime = 0, cgtime = 0, minflt = 0, cminflt = 0, majflt = 0, cmajflt = 0;
if(global_utime > max_time) global_utime = max_time;
@@ -548,19 +224,19 @@ static void normalize_utilization(struct target *root) {
if(global_gtime > max_time) global_gtime = max_time;
for(w = root; w ; w = w->next) {
- if(w->target || (!w->processes && !w->exposed)) continue;
-
- utime += w->utime;
- stime += w->stime;
- gtime += w->gtime;
- cutime += w->cutime;
- cstime += w->cstime;
- cgtime += w->cgtime;
-
- minflt += w->minflt;
- majflt += w->majflt;
- cminflt += w->cminflt;
- cmajflt += w->cmajflt;
+ if(w->target || (!w->values[PDF_PROCESSES] && !w->exposed)) continue;
+
+ utime += w->values[PDF_UTIME];
+ stime += w->values[PDF_STIME];
+ gtime += w->values[PDF_GTIME];
+ cutime += w->values[PDF_CUTIME];
+ cstime += w->values[PDF_CSTIME];
+ cgtime += w->values[PDF_CGTIME];
+
+ minflt += w->values[PDF_MINFLT];
+ majflt += w->values[PDF_MAJFLT];
+ cminflt += w->values[PDF_CMINFLT];
+ cmajflt += w->values[PDF_CMAJFLT];
}
if(global_utime || global_stime || global_gtime) {
@@ -574,7 +250,7 @@ static void normalize_utilization(struct target *root) {
cgtime_fix_ratio = 1.0; //(NETDATA_DOUBLE)(global_utime + global_stime) / (NETDATA_DOUBLE)(utime + cutime + stime + cstime);
}
else if((global_utime + global_stime > utime + stime) && (cutime || cstime)) {
- // children resources are too high
+ // children resources are too high,
// lower only the children resources
utime_fix_ratio =
stime_fix_ratio =
@@ -683,6 +359,7 @@ static void normalize_utilization(struct target *root) {
, (kernel_uint_t)(cgtime * cgtime_fix_ratio)
);
}
+#endif
// ----------------------------------------------------------------------------
// parse command line arguments
@@ -690,6 +367,7 @@ static void normalize_utilization(struct target *root) {
int check_proc_1_io() {
int ret = 0;
+#if defined(OS_LINUX)
procfile *ff = procfile_open("/proc/1/io", NULL, PROCFILE_FLAG_NO_ERROR_ON_FILE_IO);
if(!ff) goto cleanup;
@@ -700,9 +378,14 @@ int check_proc_1_io() {
cleanup:
procfile_close(ff);
+#endif
+
return ret;
}
+static bool profile_speed = false;
+static bool print_tree_and_exit = false;
+
static void parse_args(int argc, char **argv)
{
int i, freq = 0;
@@ -721,6 +404,12 @@ static void parse_args(int argc, char **argv)
exit(0);
}
+ if(strcmp("print", argv[i]) == 0 || strcmp("-print", argv[i]) == 0 || strcmp("--print", argv[i]) == 0) {
+ print_tree_and_exit = true;
+ continue;
+ }
+
+#if defined(OS_LINUX)
if(strcmp("test-permissions", argv[i]) == 0 || strcmp("-t", argv[i]) == 0) {
if(!check_proc_1_io()) {
perror("Tried to read /proc/1/io and it failed");
@@ -729,6 +418,7 @@ static void parse_args(int argc, char **argv)
printf("OK\n");
exit(0);
}
+#endif
if(strcmp("debug", argv[i]) == 0) {
debug_enabled = true;
@@ -738,7 +428,12 @@ static void parse_args(int argc, char **argv)
continue;
}
-#if !defined(__FreeBSD__) && !defined(__APPLE__)
+ if(strcmp("profile-speed", argv[i]) == 0) {
+ profile_speed = true;
+ continue;
+ }
+
+#if defined(OS_LINUX)
if(strcmp("fds-cache-secs", argv[i]) == 0) {
if(argc <= i + 1) {
fprintf(stderr, "Parameter 'fds-cache-secs' requires a number as argument.\n");
@@ -751,6 +446,7 @@ static void parse_args(int argc, char **argv)
}
#endif
+#if (PROCESSES_HAVE_CPU_CHILDREN_TIME == 1) || (PROCESSES_HAVE_CHILDREN_FLTS == 1)
if(strcmp("no-childs", argv[i]) == 0 || strcmp("without-childs", argv[i]) == 0) {
include_exited_childs = 0;
continue;
@@ -760,7 +456,9 @@ static void parse_args(int argc, char **argv)
include_exited_childs = 1;
continue;
}
+#endif
+#if (PROCESSES_HAVE_CPU_GUEST_TIME == 1)
if(strcmp("with-guest", argv[i]) == 0) {
enable_guest_charts = true;
continue;
@@ -770,26 +468,33 @@ static void parse_args(int argc, char **argv)
enable_guest_charts = false;
continue;
}
+#endif
+#if (PROCESSES_HAVE_FDS == 1)
if(strcmp("with-files", argv[i]) == 0) {
- enable_file_charts = 1;
+ enable_file_charts = CONFIG_BOOLEAN_YES;
continue;
}
if(strcmp("no-files", argv[i]) == 0 || strcmp("without-files", argv[i]) == 0) {
- enable_file_charts = 0;
+ enable_file_charts = CONFIG_BOOLEAN_NO;
continue;
}
+#endif
+#if (PROCESSES_HAVE_UID == 1) || (PROCESSES_HAVE_SID == 1)
if(strcmp("no-users", argv[i]) == 0 || strcmp("without-users", argv[i]) == 0) {
enable_users_charts = 0;
continue;
}
+#endif
+#if (PROCESSES_HAVE_GID == 1)
if(strcmp("no-groups", argv[i]) == 0 || strcmp("without-groups", argv[i]) == 0) {
enable_groups_charts = 0;
continue;
}
+#endif
if(strcmp("with-detailed-uptime", argv[i]) == 0) {
enable_detailed_uptime_charts = 1;
@@ -821,26 +526,36 @@ static void parse_args(int argc, char **argv)
" it may include sensitive data such as passwords and tokens\n"
" enabling this could be a security risk\n"
"\n"
+#if (PROCESSES_HAVE_CPU_CHILDREN_TIME == 1) || (PROCESSES_HAVE_CHILDREN_FLTS == 1)
" with-childs\n"
" without-childs enable / disable aggregating exited\n"
" children resources into parents\n"
" (default is enabled)\n"
"\n"
+#endif
+#if (PROCESSES_HAVE_CPU_GUEST_TIME == 1)
" with-guest\n"
" without-guest enable / disable reporting guest charts\n"
" (default is disabled)\n"
"\n"
+#endif
+#if (PROCESSES_HAVE_FDS == 1)
" with-files\n"
" without-files enable / disable reporting files, sockets, pipes\n"
" (default is enabled)\n"
"\n"
+#endif
+#if (PROCESSES_HAVE_UID == 1) || (PROCESSES_HAVE_SID == 1)
" without-users disable reporting per user charts\n"
"\n"
+#endif
+#if (PROCESSES_HAVE_GID == 1)
" without-groups disable reporting per user group charts\n"
"\n"
+#endif
" with-detailed-uptime enable reporting min/avg/max uptime charts\n"
"\n"
-#if !defined(__FreeBSD__) && !defined(__APPLE__)
+#if defined(OS_LINUX)
" fds-cache-secs N cache the files of processed for N seconds\n"
" caching is adaptive per file (when a file\n"
" is found, it starts at 0 and while the file\n"
@@ -852,15 +567,17 @@ static void parse_args(int argc, char **argv)
" version or -v or -V print program version and exit\n"
"\n"
, NETDATA_VERSION
-#if !defined(__FreeBSD__) && !defined(__APPLE__)
+#if defined(OS_LINUX)
, max_fds_cache_seconds
#endif
);
- exit(1);
+ exit(0);
}
+#if !defined(OS_WINDOWS) || !defined(RUN_UNDER_CLION)
netdata_log_error("Cannot understand option %s", argv[i]);
exit(1);
+#endif
}
if(freq > 0) update_every = freq;
@@ -879,7 +596,8 @@ static void parse_args(int argc, char **argv)
netdata_log_info("Loaded config file '%s/apps_groups.conf'", user_config_dir);
}
-static int am_i_running_as_root() {
+#if !defined(OS_WINDOWS)
+static inline int am_i_running_as_root() {
uid_t uid = getuid(), euid = geteuid();
if(uid == 0 || euid == 0) {
@@ -892,7 +610,7 @@ static int am_i_running_as_root() {
}
#ifdef HAVE_SYS_CAPABILITY_H
-static int check_capabilities() {
+static inline int check_capabilities() {
cap_t caps = cap_get_proc();
if(!caps) {
netdata_log_error("Cannot get current capabilities.");
@@ -936,27 +654,17 @@ static int check_capabilities() {
return ret;
}
#else
-static int check_capabilities() {
+static inline int check_capabilities() {
return 0;
}
#endif
+#endif
-static netdata_mutex_t apps_and_stdout_mutex = NETDATA_MUTEX_INITIALIZER;
-
-struct target *find_target_by_name(struct target *base, const char *name) {
- struct target *t;
- for(t = base; t ; t = t->next) {
- if (strcmp(t->name, name) == 0)
- return t;
- }
-
- return NULL;
-}
+netdata_mutex_t apps_and_stdout_mutex = NETDATA_MUTEX_INITIALIZER;
static bool apps_plugin_exit = false;
int main(int argc, char **argv) {
- clocks_init();
nd_log_initialize_for_external_plugins("apps.plugin");
pagesize = (size_t)sysconf(_SC_PAGESIZE);
@@ -999,48 +707,46 @@ int main(int argc, char **argv) {
}
#endif /* NETDATA_INTERNAL_CHECKS */
- procfile_adaptive_initial_allocation = 1;
-
- os_get_system_HZ();
-#if defined(__FreeBSD__)
- time_factor = 1000000ULL / RATES_DETAIL; // FreeBSD uses usecs
-#endif
-#if defined(__APPLE__)
- mach_timebase_info(&mach_info);
- time_factor = 1000000ULL / RATES_DETAIL;
-#endif
-#if !defined(__FreeBSD__) && !defined(__APPLE__)
- time_factor = system_hz; // Linux uses clock ticks
-#endif
-
- os_get_system_pid_max();
+ procfile_set_adaptive_allocation(true, 0, 0, 0);
os_get_system_cpus_uncached();
-
+ apps_managers_and_aggregators_init(); // before parsing args!
parse_args(argc, argv);
+#if !defined(OS_WINDOWS)
if(!check_capabilities() && !am_i_running_as_root() && !check_proc_1_io()) {
uid_t uid = getuid(), euid = geteuid();
#ifdef HAVE_SYS_CAPABILITY_H
netdata_log_error("apps.plugin should either run as root (now running with uid %u, euid %u) or have special capabilities. "
- "Without these, apps.plugin cannot report disk I/O utilization of other processes. "
- "To enable capabilities run: sudo setcap cap_dac_read_search,cap_sys_ptrace+ep %s; "
- "To enable setuid to root run: sudo chown root:netdata %s; sudo chmod 4750 %s; "
- , uid, euid, argv[0], argv[0], argv[0]
- );
+ "Without these, apps.plugin cannot report disk I/O utilization of other processes. "
+ "To enable capabilities run: sudo setcap cap_dac_read_search,cap_sys_ptrace+ep %s; "
+ "To enable setuid to root run: sudo chown root:netdata %s; sudo chmod 4750 %s; "
+ , uid, euid, argv[0], argv[0], argv[0]);
#else
netdata_log_error("apps.plugin should either run as root (now running with uid %u, euid %u) or have special capabilities. "
- "Without these, apps.plugin cannot report disk I/O utilization of other processes. "
- "Your system does not support capabilities. "
- "To enable setuid to root run: sudo chown root:netdata %s; sudo chmod 4750 %s; "
- , uid, euid, argv[0], argv[0]
- );
+ "Without these, apps.plugin cannot report disk I/O utilization of other processes. "
+ "Your system does not support capabilities. "
+ "To enable setuid to root run: sudo chown root:netdata %s; sudo chmod 4750 %s; "
+ , uid, euid, argv[0], argv[0]);
#endif
}
+#endif
netdata_log_info("started on pid %d", getpid());
- users_and_groups_init();
- pids_init();
+#if (PROCESSES_HAVE_UID == 1)
+ cached_usernames_init();
+#endif
+
+#if (PROCESSES_HAVE_GID == 1)
+ cached_groupnames_init();
+#endif
+
+#if (PROCESSES_HAVE_SID == 1)
+ cached_sid_username_init();
+#endif
+
+ apps_pids_init();
+ OS_FUNCTION(apps_os_init)();
// ------------------------------------------------------------------------
// the event loop for functions
@@ -1055,22 +761,22 @@ int main(int argc, char **argv) {
netdata_mutex_lock(&apps_and_stdout_mutex);
APPS_PLUGIN_GLOBAL_FUNCTIONS();
- usec_t step = update_every * USEC_PER_SEC;
global_iterations_counter = 1;
heartbeat_t hb;
- heartbeat_init(&hb);
+ heartbeat_init(&hb, update_every * USEC_PER_SEC);
for(; !apps_plugin_exit ; global_iterations_counter++) {
netdata_mutex_unlock(&apps_and_stdout_mutex);
-#ifdef NETDATA_PROFILING
-#warning "compiling for profiling"
- static int profiling_count=0;
- profiling_count++;
- if(unlikely(profiling_count > 2000)) exit(0);
- usec_t dt = update_every * USEC_PER_SEC;
-#else
- usec_t dt = heartbeat_next(&hb, step);
-#endif
+ usec_t dt;
+ if(profile_speed) {
+ static int profiling_count=0;
+ profiling_count++;
+ if(unlikely(profiling_count > 500)) exit(0);
+ dt = update_every * USEC_PER_SEC;
+ }
+ else
+ dt = heartbeat_next(&hb);
+
netdata_mutex_lock(&apps_and_stdout_mutex);
struct pollfd pollfd = { .fd = fileno(stdout), .events = POLLERR };
@@ -1083,9 +789,6 @@ int main(int argc, char **argv) {
fatal("Received error on read pipe.");
}
- if(global_iterations_counter % 10 == 0)
- get_MemTotal();
-
if(!collect_data_for_all_pids()) {
netdata_log_error("Cannot collect /proc data for running processes. Disabling apps.plugin...");
printf("DISABLE\n");
@@ -1093,29 +796,50 @@ int main(int argc, char **argv) {
exit(1);
}
- calculate_netdata_statistics();
+ aggregate_processes_to_targets();
+
+#if (ALL_PIDS_ARE_READ_INSTANTLY == 0)
+ OS_FUNCTION(apps_os_read_global_cpu_utilization)();
normalize_utilization(apps_groups_root_target);
+#endif
+
+ if(unlikely(print_tree_and_exit)) {
+ print_hierarchy(root_of_pids());
+ exit(0);
+ }
if(send_resource_usage)
send_resource_usage_to_netdata(dt);
+#if (PROCESSES_HAVE_STATE == 1)
send_proc_states_count(dt);
- send_charts_updates_to_netdata(apps_groups_root_target, "app", "app_group", "Apps");
+#endif
+
+ send_charts_updates_to_netdata(apps_groups_root_target, "app", "app_group", "Applications Groups");
send_collected_data_to_netdata(apps_groups_root_target, "app", dt);
+#if (PROCESSES_HAVE_UID == 1)
if (enable_users_charts) {
- send_charts_updates_to_netdata(users_root_target, "user", "user", "Users");
+ send_charts_updates_to_netdata(users_root_target, "user", "user", "User Processes");
send_collected_data_to_netdata(users_root_target, "user", dt);
}
+#endif
+#if (PROCESSES_HAVE_GID == 1)
if (enable_groups_charts) {
- send_charts_updates_to_netdata(groups_root_target, "usergroup", "user_group", "User Groups");
+ send_charts_updates_to_netdata(groups_root_target, "usergroup", "user_group", "User Group Processes");
send_collected_data_to_netdata(groups_root_target, "usergroup", dt);
}
+#endif
- fflush(stdout);
+#if (PROCESSES_HAVE_SID == 1)
+ if (enable_users_charts) {
+ send_charts_updates_to_netdata(sids_root_target, "user", "user", "User Processes");
+ send_collected_data_to_netdata(sids_root_target, "user", dt);
+ }
+#endif
- show_guest_time_old = show_guest_time;
+ fflush(stdout);
debug_log("done Loop No %zu", global_iterations_counter);
}
diff --git a/src/collectors/apps.plugin/apps_plugin.h b/src/collectors/apps.plugin/apps_plugin.h
index a085872d9..1abd07f22 100644
--- a/src/collectors/apps.plugin/apps_plugin.h
+++ b/src/collectors/apps.plugin/apps_plugin.h
@@ -6,11 +6,41 @@
#include "collectors/all.h"
#include "libnetdata/libnetdata.h"
-#ifdef __FreeBSD__
+#define OS_FUNC_CONCAT(a, b) a##b
+
+#if defined(OS_FREEBSD)
#include <sys/user.h>
-#endif
-#ifdef __APPLE__
+#define OS_INIT_PID 1
+#define ALL_PIDS_ARE_READ_INSTANTLY 1
+#define PROCESSES_HAVE_CPU_GUEST_TIME 0
+#define PROCESSES_HAVE_CPU_CHILDREN_TIME 1
+#define PROCESSES_HAVE_VOLCTX 0
+#define PROCESSES_HAVE_NVOLCTX 0
+#define PROCESSES_HAVE_PHYSICAL_IO 0
+#define PROCESSES_HAVE_LOGICAL_IO 1
+#define PROCESSES_HAVE_IO_CALLS 0
+#define PROCESSES_HAVE_UID 1
+#define PROCESSES_HAVE_GID 1
+#define PROCESSES_HAVE_SID 0
+#define PROCESSES_HAVE_MAJFLT 1
+#define PROCESSES_HAVE_CHILDREN_FLTS 1
+#define PROCESSES_HAVE_VMSWAP 0
+#define PROCESSES_HAVE_VMSHARED 0
+#define PROCESSES_HAVE_RSSFILE 0
+#define PROCESSES_HAVE_RSSSHMEM 0
+#define PROCESSES_HAVE_FDS 1
+#define PROCESSES_HAVE_HANDLES 0
+#define PROCESSES_HAVE_CMDLINE 1
+#define PROCESSES_HAVE_PID_LIMITS 0
+#define PROCESSES_HAVE_COMM_AND_NAME 0
+#define PROCESSES_HAVE_STATE 0
+#define PPID_SHOULD_BE_RUNNING 1
+#define INCREMENTAL_DATA_COLLECTION 1
+#define CPU_TO_NANOSECONDCORES (1)
+#define OS_FUNCTION(func) OS_FUNC_CONCAT(func, _freebsd)
+
+#elif defined(OS_MACOS)
#include <mach/mach.h>
#include <mach/mach_host.h>
#include <libproc.h>
@@ -18,47 +48,129 @@
#include <sys/sysctl.h>
#include <mach/mach_time.h> // For mach_timebase_info_data_t and mach_timebase_info
-extern mach_timebase_info_data_t mach_info;
-#endif
-
-// ----------------------------------------------------------------------------
-// per O/S configuration
-
-// the minimum PID of the system
-// this is also the pid of the init process
-#define INIT_PID 1
-
-// if the way apps.plugin will work, will read the entire process list,
-// including the resource utilization of each process, instantly
-// set this to 1
-// when set to 0, apps.plugin builds a sort list of processes, in order
-// to process children processes, before parent processes
-#if defined(__FreeBSD__) || defined(__APPLE__)
-#define ALL_PIDS_ARE_READ_INSTANTLY 1
-#else
-#define ALL_PIDS_ARE_READ_INSTANTLY 0
-#endif
-
-#if defined(__APPLE__)
struct pid_info {
struct kinfo_proc proc;
struct proc_taskinfo taskinfo;
struct proc_bsdinfo bsdinfo;
struct rusage_info_v4 rusageinfo;
};
+
+#define OS_INIT_PID 1
+#define ALL_PIDS_ARE_READ_INSTANTLY 1
+#define PROCESSES_HAVE_CPU_GUEST_TIME 0
+#define PROCESSES_HAVE_CPU_CHILDREN_TIME 0
+#define PROCESSES_HAVE_VOLCTX 1
+#define PROCESSES_HAVE_NVOLCTX 0
+#define PROCESSES_HAVE_PHYSICAL_IO 0
+#define PROCESSES_HAVE_LOGICAL_IO 1
+#define PROCESSES_HAVE_IO_CALLS 0
+#define PROCESSES_HAVE_UID 1
+#define PROCESSES_HAVE_GID 1
+#define PROCESSES_HAVE_SID 0
+#define PROCESSES_HAVE_MAJFLT 1
+#define PROCESSES_HAVE_CHILDREN_FLTS 0
+#define PROCESSES_HAVE_VMSWAP 0
+#define PROCESSES_HAVE_VMSHARED 0
+#define PROCESSES_HAVE_RSSFILE 0
+#define PROCESSES_HAVE_RSSSHMEM 0
+#define PROCESSES_HAVE_FDS 1
+#define PROCESSES_HAVE_HANDLES 0
+#define PROCESSES_HAVE_CMDLINE 1
+#define PROCESSES_HAVE_PID_LIMITS 0
+#define PROCESSES_HAVE_COMM_AND_NAME 0
+#define PROCESSES_HAVE_STATE 0
+#define PPID_SHOULD_BE_RUNNING 1
+#define INCREMENTAL_DATA_COLLECTION 1
+#define CPU_TO_NANOSECONDCORES (1) // already in nanoseconds
+#define OS_FUNCTION(func) OS_FUNC_CONCAT(func, _macos)
+
+#elif defined(OS_WINDOWS)
+#define OS_INIT_PID 0 // dynamic, is set during data collection
+#define ALL_PIDS_ARE_READ_INSTANTLY 1
+#define PROCESSES_HAVE_CPU_GUEST_TIME 0
+#define PROCESSES_HAVE_CPU_CHILDREN_TIME 0
+#define PROCESSES_HAVE_VOLCTX 0
+#define PROCESSES_HAVE_NVOLCTX 0
+#define PROCESSES_HAVE_PHYSICAL_IO 0
+#define PROCESSES_HAVE_LOGICAL_IO 1
+#define PROCESSES_HAVE_IO_CALLS 1
+#define PROCESSES_HAVE_UID 0
+#define PROCESSES_HAVE_GID 0
+#define PROCESSES_HAVE_SID 1
+#define PROCESSES_HAVE_MAJFLT 0
+#define PROCESSES_HAVE_CHILDREN_FLTS 0
+#define PROCESSES_HAVE_VMSWAP 1
+#define PROCESSES_HAVE_VMSHARED 0
+#define PROCESSES_HAVE_RSSFILE 0
+#define PROCESSES_HAVE_RSSSHMEM 0
+#define PROCESSES_HAVE_FDS 0
+#define PROCESSES_HAVE_HANDLES 1
+#define PROCESSES_HAVE_CMDLINE 0
+#define PROCESSES_HAVE_PID_LIMITS 0
+#define PROCESSES_HAVE_COMM_AND_NAME 1
+#define PROCESSES_HAVE_STATE 0
+#define PPID_SHOULD_BE_RUNNING 0
+#define INCREMENTAL_DATA_COLLECTION 0
+#define CPU_TO_NANOSECONDCORES (100) // convert 100ns to ns
+#define OS_FUNCTION(func) OS_FUNC_CONCAT(func, _windows)
+
+#elif defined(OS_LINUX)
+#define OS_INIT_PID 1
+#define ALL_PIDS_ARE_READ_INSTANTLY 0
+#define PROCESSES_HAVE_CPU_GUEST_TIME 1
+#define PROCESSES_HAVE_CPU_CHILDREN_TIME 1
+#define PROCESSES_HAVE_VOLCTX 1
+#define PROCESSES_HAVE_NVOLCTX 1
+#define PROCESSES_HAVE_PHYSICAL_IO 1
+#define PROCESSES_HAVE_LOGICAL_IO 1
+#define PROCESSES_HAVE_IO_CALLS 1
+#define PROCESSES_HAVE_UID 1
+#define PROCESSES_HAVE_GID 1
+#define PROCESSES_HAVE_SID 0
+#define PROCESSES_HAVE_MAJFLT 1
+#define PROCESSES_HAVE_CHILDREN_FLTS 1
+#define PROCESSES_HAVE_VMSWAP 1
+#define PROCESSES_HAVE_VMSHARED 1
+#define PROCESSES_HAVE_RSSFILE 1
+#define PROCESSES_HAVE_RSSSHMEM 1
+#define PROCESSES_HAVE_FDS 1
+#define PROCESSES_HAVE_HANDLES 0
+#define PROCESSES_HAVE_CMDLINE 1
+#define PROCESSES_HAVE_PID_LIMITS 1
+#define PROCESSES_HAVE_COMM_AND_NAME 0
+#define PROCESSES_HAVE_STATE 1
+#define PPID_SHOULD_BE_RUNNING 1
+#define USE_APPS_GROUPS_CONF 1
+#define INCREMENTAL_DATA_COLLECTION 1
+#define CPU_TO_NANOSECONDCORES (NSEC_PER_SEC / system_hz)
+#define OS_FUNCTION(func) OS_FUNC_CONCAT(func, _linux)
+
+extern int max_fds_cache_seconds;
+
+#else
+#error "Unsupported operating system"
+#endif
+
+#if (PROCESSES_HAVE_UID == 1) && (PROCESSES_HAVE_SID == 1)
+#error "Do not enable SID and UID at the same time"
#endif
-// ----------------------------------------------------------------------------
+// --------------------------------------------------------------------------------------------------------------------
+
+#define MAX_SYSTEM_FD_TO_ALLOW_FILES_PROCESSING 100000
+
+extern pid_t INIT_PID;
extern bool debug_enabled;
-extern bool enable_guest_charts;
+
extern bool enable_detailed_uptime_charts;
extern bool enable_users_charts;
extern bool enable_groups_charts;
extern bool include_exited_childs;
extern bool enable_function_cmdline;
extern bool proc_pid_cmdline_is_needed;
-extern bool enable_file_charts;
+extern int enable_file_charts;
+extern bool obsolete_file_charts;
extern size_t
global_iterations_counter,
@@ -68,19 +180,19 @@ extern size_t
inodes_changed_counter,
links_changed_counter,
targets_assignment_counter,
- all_pids_count,
apps_groups_targets_count;
-extern int
- all_files_len,
- all_files_size,
- show_guest_time,
- show_guest_time_old;
+#if (PROCESSES_HAVE_CPU_GUEST_TIME == 1)
+extern bool enable_guest_charts;
+extern bool show_guest_time;
+#endif
+#if (ALL_PIDS_ARE_READ_INSTANTLY == 0)
extern kernel_uint_t
global_utime,
global_stime,
global_gtime;
+#endif
// the normalization ratios, as calculated by normalize_utilization()
extern NETDATA_DOUBLE
@@ -95,35 +207,26 @@ extern NETDATA_DOUBLE
cminflt_fix_ratio,
cmajflt_fix_ratio;
-#if defined(__FreeBSD__) || defined(__APPLE__)
-extern usec_t system_current_time_ut;
-#else
-extern kernel_uint_t system_uptime_secs;
-#endif
-
extern size_t pagesize;
-// ----------------------------------------------------------------------------
+extern netdata_mutex_t apps_and_stdout_mutex;
+
+// --------------------------------------------------------------------------------------------------------------------
// string lengths
-#define MAX_COMPARE_NAME 100
-#define MAX_NAME 100
#define MAX_CMDLINE 65536
-// ----------------------------------------------------------------------------
-// to avoid reallocating too frequently, we can increase the number of spare
-// file descriptors used by processes.
-// IMPORTANT:
-// having a lot of spares, increases the CPU utilization of the plugin.
-#define MAX_SPARE_FDS 1
+// --------------------------------------------------------------------------------------------------------------------
+// to avoid reallocating too frequently when we add file descriptors,
+// we double the allocation at every increase request.
-#if !defined(__FreeBSD__) && !defined(__APPLE__)
-extern int max_fds_cache_seconds;
-#endif
+static inline uint32_t fds_new_size(uint32_t old_size, uint32_t new_fd) {
+ return MAX(old_size * 2, new_fd + 1); // 1 space always
+}
-// ----------------------------------------------------------------------------
+// --------------------------------------------------------------------------------------------------------------------
// some variables for keeping track of processes count by states
-
+#if (PROCESSES_HAVE_STATE == 1)
typedef enum {
PROC_STATUS_RUNNING = 0,
PROC_STATUS_SLEEPING_D, // uninterruptible sleep
@@ -135,8 +238,9 @@ typedef enum {
extern proc_state proc_state_count[PROC_STATUS_END];
extern const char *proc_states[];
+#endif
-// ----------------------------------------------------------------------------
+// --------------------------------------------------------------------------------------------------------------------
// the rates we are going to send to netdata will have this detail a value of:
// - 1 will send just integer parts to netdata
// - 100 will send 2 decimal points
@@ -144,6 +248,7 @@ extern const char *proc_states[];
// etc.
#define RATES_DETAIL 10000ULL
+#if (PROCESSES_HAVE_FDS == 1)
struct openfds {
kernel_uint_t files;
kernel_uint_t pipes;
@@ -155,10 +260,10 @@ struct openfds {
kernel_uint_t eventpolls;
kernel_uint_t other;
};
-
#define pid_openfds_sum(p) ((p)->openfds.files + (p)->openfds.pipes + (p)->openfds.sockets + (p)->openfds.inotifies + (p)->openfds.eventfds + (p)->openfds.timerfds + (p)->openfds.signalfds + (p)->openfds.eventpolls + (p)->openfds.other)
+#endif
-// ----------------------------------------------------------------------------
+// --------------------------------------------------------------------------------------------------------------------
// target
//
// target is the structure that processes are aggregated to be reported
@@ -172,69 +277,139 @@ struct pid_on_target {
struct pid_on_target *next;
};
-struct target {
- char compare[MAX_COMPARE_NAME + 1];
- uint32_t comparehash;
- size_t comparelen;
+typedef enum __attribute__((packed)) {
+ TARGET_TYPE_APP_GROUP = 1,
+#if (PROCESSES_HAVE_UID == 1)
+ TARGET_TYPE_UID,
+#endif
+#if (PROCESSES_HAVE_GID == 1)
+ TARGET_TYPE_GID,
+#endif
+#if (PROCESSES_HAVE_SID == 1)
+ TARGET_TYPE_SID,
+#endif
+ TARGET_TYPE_TREE,
+} TARGET_TYPE;
- char id[MAX_NAME + 1];
- uint32_t idhash;
+typedef enum __attribute__((packed)) {
+ // CPU utilization time
+ // The values are expressed in "NANOSECONDCORES".
+ // 1 x "NANOSECONDCORE" = 1 x NSEC_PER_SEC (1 billion).
+ PDF_UTIME, // CPU user time
+ PDF_STIME, // CPU system time
+#if (PROCESSES_HAVE_CPU_GUEST_TIME == 1)
+ PDF_GTIME, // CPU guest time
+#endif
+#if (PROCESSES_HAVE_CPU_CHILDREN_TIME == 1)
+ PDF_CUTIME, // exited children CPU user time
+ PDF_CSTIME, // exited children CPU system time
+#if (PROCESSES_HAVE_CPU_GUEST_TIME == 1)
+ PDF_CGTIME, // exited children CPU guest time
+#endif
+#endif
- char name[MAX_NAME + 1];
- char clean_name[MAX_NAME + 1]; // sanitized name used in chart id (need to replace at least dots)
- uid_t uid;
- gid_t gid;
+ PDF_MINFLT, // rate, unit: faults * RATES_DETAIL
- bool is_other;
-
- kernel_uint_t minflt;
- kernel_uint_t cminflt;
- kernel_uint_t majflt;
- kernel_uint_t cmajflt;
- kernel_uint_t utime;
- kernel_uint_t stime;
- kernel_uint_t gtime;
- kernel_uint_t cutime;
- kernel_uint_t cstime;
- kernel_uint_t cgtime;
- kernel_uint_t num_threads;
- // kernel_uint_t rss;
-
- kernel_uint_t status_vmsize;
- kernel_uint_t status_vmrss;
- kernel_uint_t status_vmshared;
- kernel_uint_t status_rssfile;
- kernel_uint_t status_rssshmem;
- kernel_uint_t status_vmswap;
- kernel_uint_t status_voluntary_ctxt_switches;
- kernel_uint_t status_nonvoluntary_ctxt_switches;
-
- kernel_uint_t io_logical_bytes_read;
- kernel_uint_t io_logical_bytes_written;
- kernel_uint_t io_read_calls;
- kernel_uint_t io_write_calls;
- kernel_uint_t io_storage_bytes_read;
- kernel_uint_t io_storage_bytes_written;
- kernel_uint_t io_cancelled_write_bytes;
+#if (PROCESSES_HAVE_MAJFLT == 1)
+ PDF_MAJFLT, // rate, unit: faults * RATES_DETAIL
+#endif
- int *target_fds;
- int target_fds_size;
+#if (PROCESSES_HAVE_CHILDREN_FLTS == 1)
+ PDF_CMINFLT, // rate, unit: faults * RATES_DETAIL
+ PDF_CMAJFLT, // rate, unit: faults * RATES_DETAIL
+#endif
- struct openfds openfds;
+ PDF_VMSIZE, // the current virtual memory used by the process, in bytes
+ PDF_VMRSS, // the resident memory used by the process, in bytes
- NETDATA_DOUBLE max_open_files_percent;
+#if (PROCESSES_HAVE_VMSHARED == 1)
+ PDF_VMSHARED, // the shared memory used by the process, in bytes
+#endif
+
+#if (PROCESSES_HAVE_RSSFILE == 1)
+ PDF_RSSFILE, // unit: bytes
+#endif
+
+#if (PROCESSES_HAVE_RSSSHMEM == 1)
+ PDF_RSSSHMEM, // unit: bytes
+#endif
+
+#if (PROCESSES_HAVE_VMSWAP == 1)
+ PDF_VMSWAP, // the swap memory used by the process, in bytes
+#endif
+
+#if (PROCESSES_HAVE_VOLCTX == 1)
+ PDF_VOLCTX, // rate, unit: switches * RATES_DETAIL
+#endif
+
+#if (PROCESSES_HAVE_NVOLCTX == 1)
+ PDF_NVOLCTX, // rate, unit: switches * RATES_DETAIL
+#endif
+
+#if (PROCESSES_HAVE_LOGICAL_IO == 1)
+ PDF_LREAD, // rate, logical reads in bytes/sec * RATES_DETAIL
+ PDF_LWRITE, // rate, logical writes in bytes/sec * RATES_DETAIL
+#endif
+
+#if (PROCESSES_HAVE_PHYSICAL_IO == 1)
+ PDF_PREAD, // rate, physical reads in bytes/sec * RATES_DETAIL
+ PDF_PWRITE, // rate, physical writes in bytes/sec * RATES_DETAIL
+#endif
+
+#if (PROCESSES_HAVE_IO_CALLS == 1)
+ PDF_OREAD, // rate, read ops/sec * RATES_DETAIL
+ PDF_OWRITE, // rate, write ops/sec * RATES_DETAIL
+#endif
+
+ PDF_UPTIME, // the process uptime in seconds
+ PDF_THREADS, // the number of threads
+ PDF_PROCESSES, // the number of processes
+
+#if (PROCESSES_HAVE_HANDLES == 1)
+ PDF_HANDLES, // the number of handles the process maintains
+#endif
+
+ // terminator
+ PDF_MAX
+} PID_FIELD;
+
+typedef struct apps_match {
+ bool starts_with:1;
+ bool ends_with:1;
+ STRING *compare;
+ SIMPLE_PATTERN *pattern;
+} APPS_MATCH;
+
+struct target {
+ STRING *id;
+ STRING *name;
+ STRING *clean_name;
+
+ TARGET_TYPE type;
+ APPS_MATCH match;
+#if (PROCESSES_HAVE_UID == 1)
+ uid_t uid;
+#endif
+#if (PROCESSES_HAVE_GID == 1)
+ gid_t gid;
+#endif
+#if (PROCESSES_HAVE_SID == 1)
+ STRING *sid_name;
+#endif
+
+ kernel_uint_t values[PDF_MAX];
kernel_uint_t uptime_min;
- kernel_uint_t uptime_sum;
kernel_uint_t uptime_max;
- unsigned int processes; // how many processes have been merged to this
- int exposed; // if set, we have sent this to netdata
- int hidden; // if set, we set the hidden flag on the dimension
- int debug_enabled;
- int ends_with;
- int starts_with; // if set, the compare string matches only the
- // beginning of the command
+#if (PROCESSES_HAVE_FDS == 1)
+ struct openfds openfds;
+ NETDATA_DOUBLE max_open_files_percent;
+ int *target_fds;
+ uint32_t target_fds_size;
+#endif
+
+ bool exposed:1; // if set, we have sent this to netdata
struct pid_on_target *root_pid; // list of aggregated pids for target debugging
@@ -242,7 +417,7 @@ struct target {
struct target *next;
};
-// ----------------------------------------------------------------------------
+// --------------------------------------------------------------------------------------------------------------------
// internal flags
// handled in code (automatically set)
@@ -258,12 +433,13 @@ typedef enum __attribute__((packed)) {
PID_LOG_LIMITS_DETAIL = (1 << 6),
} PID_LOG;
-// ----------------------------------------------------------------------------
+// --------------------------------------------------------------------------------------------------------------------
// pid_stat
//
// structure to store data for each process running
// see: man proc for the description of the fields
+#if (PROCESSES_HAVE_PID_LIMITS == 1)
struct pid_limits {
// kernel_uint_t max_cpu_time;
// kernel_uint_t max_file_size;
@@ -282,11 +458,12 @@ struct pid_limits {
// kernel_uint_t max_realtime_priority;
// kernel_uint_t max_realtime_timeout;
};
+#endif
struct pid_fd {
int fd;
-#if !defined(__FreeBSD__) && !defined(__APPLE__)
+#if defined(OS_LINUX)
ino_t inode;
char *filename;
uint32_t link_hash;
@@ -295,6 +472,10 @@ struct pid_fd {
#endif
};
+#define pid_stat_comm(p) (string2str((p)->comm))
+#define pid_stat_cmdline(p) (string2str((p)->cmdline))
+uint32_t all_files_len_get(void);
+
struct pid_stat {
int32_t pid;
int32_t ppid;
@@ -304,122 +485,86 @@ struct pid_stat {
// int32_t tpgid;
// uint64_t flags;
- char state;
-
- char comm[MAX_COMPARE_NAME + 1];
- char *cmdline;
-
- // these are raw values collected
- kernel_uint_t minflt_raw;
- kernel_uint_t cminflt_raw;
- kernel_uint_t majflt_raw;
- kernel_uint_t cmajflt_raw;
- kernel_uint_t utime_raw;
- kernel_uint_t stime_raw;
- kernel_uint_t gtime_raw; // guest_time
- kernel_uint_t cutime_raw;
- kernel_uint_t cstime_raw;
- kernel_uint_t cgtime_raw; // cguest_time
-
- // these are rates
- kernel_uint_t minflt;
- kernel_uint_t cminflt;
- kernel_uint_t majflt;
- kernel_uint_t cmajflt;
- kernel_uint_t utime;
- kernel_uint_t stime;
- kernel_uint_t gtime;
- kernel_uint_t cutime;
- kernel_uint_t cstime;
- kernel_uint_t cgtime;
-
- // int64_t priority;
- // int64_t nice;
- int32_t num_threads;
- // int64_t itrealvalue;
- // kernel_uint_t collected_starttime;
- // kernel_uint_t vsize;
- // kernel_uint_t rss;
- // kernel_uint_t rsslim;
- // kernel_uint_t starcode;
- // kernel_uint_t endcode;
- // kernel_uint_t startstack;
- // kernel_uint_t kstkesp;
- // kernel_uint_t kstkeip;
- // uint64_t signal;
- // uint64_t blocked;
- // uint64_t sigignore;
- // uint64_t sigcatch;
- // uint64_t wchan;
- // uint64_t nswap;
- // uint64_t cnswap;
- // int32_t exit_signal;
- // int32_t processor;
- // uint32_t rt_priority;
- // uint32_t policy;
- // kernel_uint_t delayacct_blkio_ticks;
+ struct pid_stat *parent;
+ struct pid_stat *next;
+ struct pid_stat *prev;
- uid_t uid;
- gid_t gid;
+ struct target *target; // app_groups.conf/tree targets
- kernel_uint_t status_voluntary_ctxt_switches_raw;
- kernel_uint_t status_nonvoluntary_ctxt_switches_raw;
-
- kernel_uint_t status_vmsize;
- kernel_uint_t status_vmrss;
- kernel_uint_t status_vmshared;
- kernel_uint_t status_rssfile;
- kernel_uint_t status_rssshmem;
- kernel_uint_t status_vmswap;
- kernel_uint_t status_voluntary_ctxt_switches;
- kernel_uint_t status_nonvoluntary_ctxt_switches;
-#ifndef __FreeBSD__
- ARL_BASE *status_arl;
+#if (PROCESSES_HAVE_UID == 1)
+ struct target *uid_target; // uid based targets
+#endif
+#if (PROCESSES_HAVE_GID == 1)
+ struct target *gid_target; // gid based targets
+#endif
+#if (PROCESSES_HAVE_SID == 1)
+ struct target *sid_target; // sid based targets
#endif
- kernel_uint_t io_logical_bytes_read_raw;
- kernel_uint_t io_logical_bytes_written_raw;
- kernel_uint_t io_read_calls_raw;
- kernel_uint_t io_write_calls_raw;
- kernel_uint_t io_storage_bytes_read_raw;
- kernel_uint_t io_storage_bytes_written_raw;
- kernel_uint_t io_cancelled_write_bytes_raw;
+ STRING *comm_orig; // the command, as-collected
+ STRING *comm; // the command, sanitized
+ STRING *name; // the command name, if any, sanitized
+ STRING *cmdline; // the full command line of the program
- kernel_uint_t io_logical_bytes_read;
- kernel_uint_t io_logical_bytes_written;
- kernel_uint_t io_read_calls;
- kernel_uint_t io_write_calls;
- kernel_uint_t io_storage_bytes_read;
- kernel_uint_t io_storage_bytes_written;
- kernel_uint_t io_cancelled_write_bytes;
+#if defined(OS_WINDOWS)
+ COUNTER_DATA perflib[PDF_MAX];
+#else
+ kernel_uint_t raw[PDF_MAX];
+#endif
- kernel_uint_t uptime;
+ kernel_uint_t values[PDF_MAX];
- struct pid_fd *fds; // array of fds it uses
- size_t fds_size; // the size of the fds array
+#if (PROCESSES_HAVE_UID == 1)
+ uid_t uid;
+#endif
+#if (PROCESSES_HAVE_GID == 1)
+ gid_t gid;
+#endif
+#if (PROCESSES_HAVE_SID == 1)
+ STRING *sid_name;
+#endif
+
+#if (ALL_PIDS_ARE_READ_INSTANTLY == 0)
+ uint32_t sortlist; // higher numbers = top on the process tree
+ // each process gets a unique number (non-sequential though)
+#endif
+#if (PROCESSES_HAVE_FDS == 1)
struct openfds openfds;
+#if (PROCESSES_HAVE_PID_LIMITS == 1)
struct pid_limits limits;
-
NETDATA_DOUBLE openfds_limits_percent;
+#endif
+ struct pid_fd *fds; // array of fds it uses
+ uint32_t fds_size; // the size of the fds array
+#endif
- int sortlist; // higher numbers = top on the process tree
- // each process gets a unique number
+ uint32_t children_count; // the number of processes directly referencing this.
+ // used internally for apps_groups.conf inheritance.
+ // don't rely on it for anything else.
- int children_count; // number of processes directly referencing this
- int keeploops; // increases by 1 every time keep is 1 and updated 0
+ uint32_t keeploops; // increases by 1 every time keep is 1 and updated 0
PID_LOG log_thrown;
- bool keep; // true when we need to keep this process in memory even after it exited
- bool updated; // true when the process is currently running
- bool merged; // true when it has been merged to its parent
- bool read; // true when we have already read this process for this iteration
- bool matched_by_config;
+ bool read:1; // true when we have already read this process for this iteration
+ bool updated:1; // true when the process is currently running
+ bool merged:1; // true when it has been merged to its parent
+ bool keep:1; // true when we need to keep this process in memory even after it exited
+ bool is_manager:1; // true when this pid is a process manager
+ bool is_aggregator:1; // true when this pid is a process aggregator
+
+ bool matched_by_config:1;
+
+#if (PROCESSES_HAVE_STATE == 1)
+ char state;
+#endif
- struct target *target; // app_groups.conf targets
- struct target *user_target; // uid based targets
- struct target *group_target; // gid based targets
+#if defined(OS_WINDOWS)
+ bool got_info:1;
+ bool got_service:1;
+ bool initialized:1;
+#endif
usec_t stat_collected_usec;
usec_t last_stat_collected_usec;
@@ -428,70 +573,22 @@ struct pid_stat {
usec_t last_io_collected_usec;
usec_t last_limits_collected_usec;
+#if defined(OS_LINUX)
+ ARL_BASE *status_arl;
char *fds_dirname; // the full directory name in /proc/PID/fd
-
char *stat_filename;
char *status_filename;
char *io_filename;
char *cmdline_filename;
char *limits_filename;
-
- struct pid_stat *parent;
- struct pid_stat *prev;
- struct pid_stat *next;
-};
-
-// ----------------------------------------------------------------------------
-
-struct user_or_group_id {
- avl_t avl;
-
- union {
- uid_t uid;
- gid_t gid;
- } id;
-
- char *name;
-
- int updated;
-
- struct user_or_group_id * next;
+#endif
};
-extern struct target
- *apps_groups_default_target,
- *apps_groups_root_target,
- *users_root_target,
- *groups_root_target;
-
-extern struct pid_stat *root_of_pids;
+// --------------------------------------------------------------------------------------------------------------------
extern int update_every;
-extern unsigned int time_factor;
-extern kernel_uint_t MemTotal;
-
-#if (ALL_PIDS_ARE_READ_INSTANTLY == 0)
-extern pid_t *all_pids_sortlist;
-#endif
-
-#define APPS_PLUGIN_PROCESSES_FUNCTION_DESCRIPTION "Detailed information on the currently running processes."
-
-void function_processes(const char *transaction, char *function,
- usec_t *stop_monotonic_ut __maybe_unused, bool *cancelled __maybe_unused,
- BUFFER *payload __maybe_unused, HTTP_ACCESS access,
- const char *source __maybe_unused, void *data __maybe_unused);
-
-struct target *find_target_by_name(struct target *base, const char *name);
-
-struct target *get_users_target(uid_t uid);
-struct target *get_groups_target(gid_t gid);
-int read_apps_groups_conf(const char *path, const char *file);
-
-void users_and_groups_init(void);
-struct user_or_group_id *user_id_find(struct user_or_group_id *user_id_to_find);
-struct user_or_group_id *group_id_find(struct user_or_group_id *group_id_to_find);
-// ----------------------------------------------------------------------------
+// --------------------------------------------------------------------------------------------------------------------
// debugging
static inline void debug_log_int(const char *fmt, ... ) {
@@ -515,46 +612,152 @@ static inline void debug_log_dummy(void) {}
#define debug_log(fmt, args...) debug_log_dummy()
#endif
-int managed_log(struct pid_stat *p, PID_LOG log, int status);
+bool managed_log(struct pid_stat *p, PID_LOG log, bool status);
+void sanitize_apps_plugin_chart_meta(char *buf);
-// ----------------------------------------------------------------------------
+// --------------------------------------------------------------------------------------------------------------------
// macro to calculate the incremental rate of a value
// each parameter is accessed only ONCE - so it is safe to pass function calls
// or other macros as parameters
-#define incremental_rate(rate_variable, last_kernel_variable, new_kernel_value, collected_usec, last_collected_usec) do { \
+#define incremental_rate(rate_variable, last_kernel_variable, new_kernel_value, collected_usec, last_collected_usec, multiplier) do { \
kernel_uint_t _new_tmp = new_kernel_value; \
- (rate_variable) = (_new_tmp - (last_kernel_variable)) * (USEC_PER_SEC * RATES_DETAIL) / ((collected_usec) - (last_collected_usec)); \
+ (rate_variable) = (_new_tmp - (last_kernel_variable)) * (USEC_PER_SEC * (multiplier)) / ((collected_usec) - (last_collected_usec)); \
(last_kernel_variable) = _new_tmp; \
} while(0)
// the same macro for struct pid members
-#define pid_incremental_rate(type, var, value) \
- incremental_rate(var, var##_raw, value, p->type##_collected_usec, p->last_##type##_collected_usec)
+#define pid_incremental_rate(type, idx, value) \
+ incremental_rate(p->values[idx], p->raw[idx], value, p->type##_collected_usec, p->last_##type##_collected_usec, RATES_DETAIL)
-int read_proc_pid_stat(struct pid_stat *p, void *ptr);
-int read_proc_pid_limits(struct pid_stat *p, void *ptr);
-int read_proc_pid_status(struct pid_stat *p, void *ptr);
-int read_proc_pid_cmdline(struct pid_stat *p);
-int read_proc_pid_io(struct pid_stat *p, void *ptr);
-int read_pid_file_descriptors(struct pid_stat *p, void *ptr);
-int read_global_time(void);
-void get_MemTotal(void);
+#define pid_incremental_cpu(type, idx, value) \
+ incremental_rate(p->values[idx], p->raw[idx], value, p->type##_collected_usec, p->last_##type##_collected_usec, CPU_TO_NANOSECONDCORES)
-bool collect_data_for_all_pids(void);
-void cleanup_exited_pids(void);
+void apps_managers_and_aggregators_init(void);
+void apps_pids_init(void);
+
+#if (PROCESSES_HAVE_CMDLINE == 1)
+int read_proc_pid_cmdline(struct pid_stat *p);
+#endif
+#if (PROCESSES_HAVE_FDS == 1)
void clear_pid_fd(struct pid_fd *pfd);
void file_descriptor_not_used(int id);
void init_pid_fds(struct pid_stat *p, size_t first, size_t size);
void aggregate_pid_fds_on_targets(struct pid_stat *p);
+int read_pid_file_descriptors(struct pid_stat *p, void *ptr);
+void make_all_pid_fds_negative(struct pid_stat *p);
+uint32_t file_descriptor_find_or_add(const char *name, uint32_t hash);
+#endif
+
+// --------------------------------------------------------------------------------------------------------------------
+// data collection management
+
+bool pid_match_check(struct pid_stat *p, APPS_MATCH *match);
+APPS_MATCH pid_match_create(const char *comm);
+void pid_match_cleanup(APPS_MATCH *m);
+
+bool collect_data_for_all_pids(void);
+
+void pid_collection_started(struct pid_stat *p);
+void pid_collection_failed(struct pid_stat *p);
+void pid_collection_completed(struct pid_stat *p);
+
+#if (INCREMENTAL_DATA_COLLECTION == 1)
+bool collect_parents_before_children(void);
+int incrementally_collect_data_for_pid(pid_t pid, void *ptr);
+int incrementally_collect_data_for_pid_stat(struct pid_stat *p, void *ptr);
+#endif
+
+// --------------------------------------------------------------------------------------------------------------------
+// pid management
+
+struct pid_stat *root_of_pids(void);
+size_t all_pids_count(void);
+
+struct pid_stat *get_or_allocate_pid_entry(pid_t pid);
+struct pid_stat *find_pid_entry(pid_t pid);
+void del_pid_entry(pid_t pid);
+void update_pid_comm(struct pid_stat *p, const char *comm);
+void update_pid_cmdline(struct pid_stat *p, const char *cmdline);
+
+bool is_process_a_manager(struct pid_stat *p);
+bool is_process_an_aggregator(struct pid_stat *p);
+bool is_process_an_interpreter(struct pid_stat *p);
+
+// --------------------------------------------------------------------------------------------------------------------
+// targets management
+
+struct target *find_target_by_name(struct target *base, const char *name);
+struct target *get_tree_target(struct pid_stat *p);
+
+void aggregate_processes_to_targets(void);
+
+#if (PROCESSES_HAVE_UID == 1)
+extern struct target *users_root_target;
+struct target *get_uid_target(uid_t uid);
+#endif
+
+#if (PROCESSES_HAVE_GID == 1)
+extern struct target *groups_root_target;
+struct target *get_gid_target(gid_t gid);
+#endif
+
+#if (PROCESSES_HAVE_SID == 1)
+extern struct target *sids_root_target;
+struct target *get_sid_target(STRING *sid_name);
+#endif
+
+extern struct target *apps_groups_root_target;
+int read_apps_groups_conf(const char *path, const char *file);
+
+// --------------------------------------------------------------------------------------------------------------------
+// output
-void send_proc_states_count(usec_t dt);
void send_charts_updates_to_netdata(struct target *root, const char *type, const char *lbl_name, const char *title);
void send_collected_data_to_netdata(struct target *root, const char *type, usec_t dt);
void send_resource_usage_to_netdata(usec_t dt);
-void pids_init(void);
-struct pid_stat *find_pid_entry(pid_t pid);
+#if (PROCESSES_HAVE_STATE == 1)
+void send_proc_states_count(usec_t dt);
+#endif
+
+#define APPS_PLUGIN_PROCESSES_FUNCTION_DESCRIPTION "Detailed information on the currently running processes."
+void function_processes(const char *transaction, char *function,
+ usec_t *stop_monotonic_ut __maybe_unused, bool *cancelled __maybe_unused,
+ BUFFER *payload __maybe_unused, HTTP_ACCESS access,
+ const char *source __maybe_unused, void *data __maybe_unused);
+
+// --------------------------------------------------------------------------------------------------------------------
+// operating system functions
+
+// one time initialization per operating system
+void OS_FUNCTION(apps_os_init)(void);
+
+// collect all the available information for all processes running
+bool OS_FUNCTION(apps_os_collect_all_pids)(void);
+
+bool OS_FUNCTION(apps_os_read_pid_status)(struct pid_stat *p, void *ptr);
+bool OS_FUNCTION(apps_os_read_pid_stat)(struct pid_stat *p, void *ptr);
+bool OS_FUNCTION(apps_os_read_pid_io)(struct pid_stat *p, void *ptr);
+
+#if (PROCESSES_HAVE_PID_LIMITS == 1)
+bool OS_FUNCTION(apps_os_read_pid_limits)(struct pid_stat *p, void *ptr);
+#endif
+
+#if (PROCESSES_HAVE_CMDLINE == 1)
+bool OS_FUNCTION(apps_os_get_pid_cmdline)(struct pid_stat *p, char *cmdline, size_t bytes);
+#endif
+
+#if (PROCESSES_HAVE_FDS == 1)
+bool OS_FUNCTION(apps_os_read_pid_fds)(struct pid_stat *p, void *ptr);
+#endif
+
+#if (ALL_PIDS_ARE_READ_INSTANTLY == 0)
+bool OS_FUNCTION(apps_os_read_global_cpu_utilization)(void);
+#endif
+
+// return the total physical memory of the system, in bytes
+uint64_t OS_FUNCTION(apps_os_get_total_memory)(void);
#endif //NETDATA_APPS_PLUGIN_H
diff --git a/src/collectors/apps.plugin/apps_proc_meminfo.c b/src/collectors/apps.plugin/apps_proc_meminfo.c
deleted file mode 100644
index a7227c213..000000000
--- a/src/collectors/apps.plugin/apps_proc_meminfo.c
+++ /dev/null
@@ -1,68 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-#include "apps_plugin.h"
-
-kernel_uint_t MemTotal = 0;
-
-#ifdef __FreeBSD__
-static inline bool get_MemTotal_per_os(void) {
- int mib[2] = {CTL_HW, HW_PHYSMEM};
- size_t size = sizeof(MemTotal);
- if (sysctl(mib, 2, &MemTotal, &size, NULL, 0) == -1) {
- netdata_log_error("Failed to get total memory using sysctl");
- return false;
- }
- // FreeBSD returns bytes; convert to kB
- MemTotal /= 1024;
- return true;
-}
-#endif // __FreeBSD__
-
-#ifdef __APPLE__
-static inline bool get_MemTotal_per_os(void) {
- int mib[2] = {CTL_HW, HW_MEMSIZE};
- size_t size = sizeof(MemTotal);
- if (sysctl(mib, 2, &MemTotal, &size, NULL, 0) == -1) {
- netdata_log_error("Failed to get total memory using sysctl");
- return false;
- }
- // MacOS returns bytes; convert to kB
- MemTotal /= 1024;
- return true;
-}
-#endif // __APPLE__
-
-#if !defined(__FreeBSD__) && !defined(__APPLE__)
-static inline bool get_MemTotal_per_os(void) {
- char filename[FILENAME_MAX + 1];
- snprintfz(filename, FILENAME_MAX, "%s/proc/meminfo", netdata_configured_host_prefix);
-
- procfile *ff = procfile_open(filename, ": \t", PROCFILE_FLAG_DEFAULT);
- if(!ff)
- return false;
-
- ff = procfile_readall(ff);
- if(!ff)
- return false;
-
- size_t line, lines = procfile_lines(ff);
-
- for(line = 0; line < lines ;line++) {
- size_t words = procfile_linewords(ff, line);
- if(words == 3 && strcmp(procfile_lineword(ff, line, 0), "MemTotal") == 0 && strcmp(procfile_lineword(ff, line, 2), "kB") == 0) {
- kernel_uint_t n = str2ull(procfile_lineword(ff, line, 1), NULL);
- if(n) MemTotal = n;
- break;
- }
- }
-
- procfile_close(ff);
-
- return true;
-}
-#endif
-
-void get_MemTotal(void) {
- if(!get_MemTotal_per_os())
- MemTotal = 0;
-}
diff --git a/src/collectors/apps.plugin/apps_proc_pid_cmdline.c b/src/collectors/apps.plugin/apps_proc_pid_cmdline.c
deleted file mode 100644
index 75a60fa3a..000000000
--- a/src/collectors/apps.plugin/apps_proc_pid_cmdline.c
+++ /dev/null
@@ -1,130 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-#include "apps_plugin.h"
-
-#ifdef __APPLE__
-bool get_cmdline_per_os(struct pid_stat *p, char *cmdline, size_t maxBytes) {
- int mib[3] = {CTL_KERN, KERN_PROCARGS2, p->pid};
- static char *args = NULL;
- static size_t size = 0;
-
- size_t new_size;
- if (sysctl(mib, 3, NULL, &new_size, NULL, 0) == -1) {
- return false;
- }
-
- if (new_size > size) {
- if (args)
- freez(args);
-
- args = (char *)mallocz(new_size);
- size = new_size;
- }
-
- memset(cmdline, 0, new_size < maxBytes ? new_size : maxBytes);
-
- size_t used_size = size;
- if (sysctl(mib, 3, args, &used_size, NULL, 0) == -1)
- return false;
-
- int argc;
- memcpy(&argc, args, sizeof(argc));
- char *ptr = args + sizeof(argc);
- used_size -= sizeof(argc);
-
- // Skip the executable path
- while (*ptr && used_size > 0) {
- ptr++;
- used_size--;
- }
-
- // Copy only the arguments to the cmdline buffer, skipping the environment variables
- size_t i = 0, copied_args = 0;
- bool inArg = false;
- for (; used_size > 0 && i < maxBytes - 1 && copied_args < argc; --used_size, ++ptr) {
- if (*ptr == '\0') {
- if (inArg) {
- cmdline[i++] = ' '; // Replace nulls between arguments with spaces
- inArg = false;
- copied_args++;
- }
- } else {
- cmdline[i++] = *ptr;
- inArg = true;
- }
- }
-
- if (i > 0 && cmdline[i - 1] == ' ')
- i--; // Remove the trailing space if present
-
- cmdline[i] = '\0'; // Null-terminate the string
-
- return true;
-}
-#endif // __APPLE__
-
-#if defined(__FreeBSD__)
-static inline bool get_cmdline_per_os(struct pid_stat *p, char *cmdline, size_t bytes) {
- size_t i, b = bytes - 1;
- int mib[4];
-
- mib[0] = CTL_KERN;
- mib[1] = KERN_PROC;
- mib[2] = KERN_PROC_ARGS;
- mib[3] = p->pid;
- if (unlikely(sysctl(mib, 4, cmdline, &b, NULL, 0)))
- return false;
-
- cmdline[b] = '\0';
- for(i = 0; i < b ; i++)
- if(unlikely(!cmdline[i])) cmdline[i] = ' ';
-
- return true;
-}
-#endif // __FreeBSD__
-
-#if !defined(__FreeBSD__) && !defined(__APPLE__)
-static inline bool get_cmdline_per_os(struct pid_stat *p, char *cmdline, size_t bytes) {
- if(unlikely(!p->cmdline_filename)) {
- char filename[FILENAME_MAX];
- snprintfz(filename, FILENAME_MAX, "%s/proc/%d/cmdline", netdata_configured_host_prefix, p->pid);
- p->cmdline_filename = strdupz(filename);
- }
-
- int fd = open(p->cmdline_filename, procfile_open_flags, 0666);
- if(unlikely(fd == -1))
- return false;
-
- ssize_t i, b = read(fd, cmdline, bytes - 1);
- close(fd);
-
- if(unlikely(b < 0))
- return false;
-
- cmdline[b] = '\0';
- for(i = 0; i < b ; i++)
- if(unlikely(!cmdline[i])) cmdline[i] = ' ';
-
- return true;
-}
-#endif // !__FreeBSD__ !__APPLE__
-
-int read_proc_pid_cmdline(struct pid_stat *p) {
- static char cmdline[MAX_CMDLINE];
-
- if(unlikely(!get_cmdline_per_os(p, cmdline, sizeof(cmdline))))
- goto cleanup;
-
- if(p->cmdline) freez(p->cmdline);
- p->cmdline = strdupz(cmdline);
-
- debug_log("Read file '%s' contents: %s", p->cmdline_filename, p->cmdline);
-
- return 1;
-
-cleanup:
- // copy the command to the command line
- if(p->cmdline) freez(p->cmdline);
- p->cmdline = strdupz(p->comm);
- return 0;
-}
diff --git a/src/collectors/apps.plugin/apps_proc_pid_fd.c b/src/collectors/apps.plugin/apps_proc_pid_fd.c
deleted file mode 100644
index 519b0794d..000000000
--- a/src/collectors/apps.plugin/apps_proc_pid_fd.c
+++ /dev/null
@@ -1,753 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-#include "apps_plugin.h"
-
-// ----------------------------------------------------------------------------
-// file descriptor
-//
-// this is used to keep a global list of all open files of the system.
-// it is needed in order to calculate the unique files processes have open.
-
-#define FILE_DESCRIPTORS_INCREASE_STEP 100
-
-// types for struct file_descriptor->type
-typedef enum fd_filetype {
- FILETYPE_OTHER,
- FILETYPE_FILE,
- FILETYPE_PIPE,
- FILETYPE_SOCKET,
- FILETYPE_INOTIFY,
- FILETYPE_EVENTFD,
- FILETYPE_EVENTPOLL,
- FILETYPE_TIMERFD,
- FILETYPE_SIGNALFD
-} FD_FILETYPE;
-
-struct file_descriptor {
- avl_t avl;
-
-#ifdef NETDATA_INTERNAL_CHECKS
- uint32_t magic;
-#endif /* NETDATA_INTERNAL_CHECKS */
-
- const char *name;
- uint32_t hash;
-
- FD_FILETYPE type;
- int count;
- int pos;
-} *all_files = NULL;
-
-// ----------------------------------------------------------------------------
-
-static inline void reallocate_target_fds(struct target *w) {
- if(unlikely(!w))
- return;
-
- if(unlikely(!w->target_fds || w->target_fds_size < all_files_size)) {
- w->target_fds = reallocz(w->target_fds, sizeof(int) * all_files_size);
- memset(&w->target_fds[w->target_fds_size], 0, sizeof(int) * (all_files_size - w->target_fds_size));
- w->target_fds_size = all_files_size;
- }
-}
-
-static void aggregage_fd_type_on_openfds(FD_FILETYPE type, struct openfds *openfds) {
- switch(type) {
- case FILETYPE_FILE:
- openfds->files++;
- break;
-
- case FILETYPE_PIPE:
- openfds->pipes++;
- break;
-
- case FILETYPE_SOCKET:
- openfds->sockets++;
- break;
-
- case FILETYPE_INOTIFY:
- openfds->inotifies++;
- break;
-
- case FILETYPE_EVENTFD:
- openfds->eventfds++;
- break;
-
- case FILETYPE_TIMERFD:
- openfds->timerfds++;
- break;
-
- case FILETYPE_SIGNALFD:
- openfds->signalfds++;
- break;
-
- case FILETYPE_EVENTPOLL:
- openfds->eventpolls++;
- break;
-
- case FILETYPE_OTHER:
- openfds->other++;
- break;
- }
-}
-
-static inline void aggregate_fd_on_target(int fd, struct target *w) {
- if(unlikely(!w))
- return;
-
- if(unlikely(w->target_fds[fd])) {
- // it is already aggregated
- // just increase its usage counter
- w->target_fds[fd]++;
- return;
- }
-
- // increase its usage counter
- // so that we will not add it again
- w->target_fds[fd]++;
-
- aggregage_fd_type_on_openfds(all_files[fd].type, &w->openfds);
-}
-
-void aggregate_pid_fds_on_targets(struct pid_stat *p) {
-
- if(unlikely(!p->updated)) {
- // the process is not running
- return;
- }
-
- struct target *w = p->target, *u = p->user_target, *g = p->group_target;
-
- reallocate_target_fds(w);
- reallocate_target_fds(u);
- reallocate_target_fds(g);
-
- p->openfds.files = 0;
- p->openfds.pipes = 0;
- p->openfds.sockets = 0;
- p->openfds.inotifies = 0;
- p->openfds.eventfds = 0;
- p->openfds.timerfds = 0;
- p->openfds.signalfds = 0;
- p->openfds.eventpolls = 0;
- p->openfds.other = 0;
-
- long currentfds = 0;
- size_t c, size = p->fds_size;
- struct pid_fd *fds = p->fds;
- for(c = 0; c < size ;c++) {
- int fd = fds[c].fd;
-
- if(likely(fd <= 0 || fd >= all_files_size))
- continue;
-
- currentfds++;
- aggregage_fd_type_on_openfds(all_files[fd].type, &p->openfds);
-
- aggregate_fd_on_target(fd, w);
- aggregate_fd_on_target(fd, u);
- aggregate_fd_on_target(fd, g);
- }
-}
-
-// ----------------------------------------------------------------------------
-
-int file_descriptor_compare(void* a, void* b) {
-#ifdef NETDATA_INTERNAL_CHECKS
- if(((struct file_descriptor *)a)->magic != 0x0BADCAFE || ((struct file_descriptor *)b)->magic != 0x0BADCAFE)
- netdata_log_error("Corrupted index data detected. Please report this.");
-#endif /* NETDATA_INTERNAL_CHECKS */
-
- if(((struct file_descriptor *)a)->hash < ((struct file_descriptor *)b)->hash)
- return -1;
-
- else if(((struct file_descriptor *)a)->hash > ((struct file_descriptor *)b)->hash)
- return 1;
-
- else
- return strcmp(((struct file_descriptor *)a)->name, ((struct file_descriptor *)b)->name);
-}
-
-// int file_descriptor_iterator(avl_t *a) { if(a) {}; return 0; }
-
-avl_tree_type all_files_index = {
- NULL,
- file_descriptor_compare
-};
-
-static struct file_descriptor *file_descriptor_find(const char *name, uint32_t hash) {
- struct file_descriptor tmp;
- tmp.hash = (hash)?hash:simple_hash(name);
- tmp.name = name;
- tmp.count = 0;
- tmp.pos = 0;
-#ifdef NETDATA_INTERNAL_CHECKS
- tmp.magic = 0x0BADCAFE;
-#endif /* NETDATA_INTERNAL_CHECKS */
-
- return (struct file_descriptor *)avl_search(&all_files_index, (avl_t *) &tmp);
-}
-
-#define file_descriptor_add(fd) avl_insert(&all_files_index, (avl_t *)(fd))
-#define file_descriptor_remove(fd) avl_remove(&all_files_index, (avl_t *)(fd))
-
-// ----------------------------------------------------------------------------
-
-void file_descriptor_not_used(int id) {
- if(id > 0 && id < all_files_size) {
-
-#ifdef NETDATA_INTERNAL_CHECKS
- if(all_files[id].magic != 0x0BADCAFE) {
- netdata_log_error("Ignoring request to remove empty file id %d.", id);
- return;
- }
-#endif /* NETDATA_INTERNAL_CHECKS */
-
- debug_log("decreasing slot %d (count = %d).", id, all_files[id].count);
-
- if(all_files[id].count > 0) {
- all_files[id].count--;
-
- if(!all_files[id].count) {
- debug_log(" >> slot %d is empty.", id);
-
- if(unlikely(file_descriptor_remove(&all_files[id]) != (void *)&all_files[id]))
- netdata_log_error("INTERNAL ERROR: removal of unused fd from index, removed a different fd");
-
-#ifdef NETDATA_INTERNAL_CHECKS
- all_files[id].magic = 0x00000000;
-#endif /* NETDATA_INTERNAL_CHECKS */
- all_files_len--;
- }
- }
- else
- netdata_log_error("Request to decrease counter of fd %d (%s), while the use counter is 0",
- id,
- all_files[id].name);
- }
- else
- netdata_log_error("Request to decrease counter of fd %d, which is outside the array size (1 to %d)",
- id,
- all_files_size);
-}
-
-static inline void all_files_grow() {
- void *old = all_files;
- int i;
-
- // there is no empty slot
- debug_log("extending fd array to %d entries", all_files_size + FILE_DESCRIPTORS_INCREASE_STEP);
-
- all_files = reallocz(all_files, (all_files_size + FILE_DESCRIPTORS_INCREASE_STEP) * sizeof(struct file_descriptor));
-
- // if the address changed, we have to rebuild the index
- // since all pointers are now invalid
-
- if(unlikely(old && old != (void *)all_files)) {
- debug_log(" >> re-indexing.");
-
- all_files_index.root = NULL;
- for(i = 0; i < all_files_size; i++) {
- if(!all_files[i].count) continue;
- if(unlikely(file_descriptor_add(&all_files[i]) != (void *)&all_files[i]))
- netdata_log_error("INTERNAL ERROR: duplicate indexing of fd during realloc.");
- }
-
- debug_log(" >> re-indexing done.");
- }
-
- // initialize the newly added entries
-
- for(i = all_files_size; i < (all_files_size + FILE_DESCRIPTORS_INCREASE_STEP); i++) {
- all_files[i].count = 0;
- all_files[i].name = NULL;
-#ifdef NETDATA_INTERNAL_CHECKS
- all_files[i].magic = 0x00000000;
-#endif /* NETDATA_INTERNAL_CHECKS */
- all_files[i].pos = i;
- }
-
- if(unlikely(!all_files_size)) all_files_len = 1;
- all_files_size += FILE_DESCRIPTORS_INCREASE_STEP;
-}
-
-static inline int file_descriptor_set_on_empty_slot(const char *name, uint32_t hash, FD_FILETYPE type) {
- // check we have enough memory to add it
- if(!all_files || all_files_len == all_files_size)
- all_files_grow();
-
- debug_log(" >> searching for empty slot.");
-
- // search for an empty slot
-
- static int last_pos = 0;
- int i, c;
- for(i = 0, c = last_pos ; i < all_files_size ; i++, c++) {
- if(c >= all_files_size) c = 0;
- if(c == 0) continue;
-
- if(!all_files[c].count) {
- debug_log(" >> Examining slot %d.", c);
-
-#ifdef NETDATA_INTERNAL_CHECKS
- if(all_files[c].magic == 0x0BADCAFE && all_files[c].name && file_descriptor_find(all_files[c].name, all_files[c].hash))
- netdata_log_error("fd on position %d is not cleared properly. It still has %s in it.", c, all_files[c].name);
-#endif /* NETDATA_INTERNAL_CHECKS */
-
- debug_log(" >> %s fd position %d for %s (last name: %s)", all_files[c].name?"re-using":"using", c, name, all_files[c].name);
-
- freez((void *)all_files[c].name);
- all_files[c].name = NULL;
- last_pos = c;
- break;
- }
- }
-
- all_files_len++;
-
- if(i == all_files_size) {
- fatal("We should find an empty slot, but there isn't any");
- exit(1);
- }
- // else we have an empty slot in 'c'
-
- debug_log(" >> updating slot %d.", c);
-
- all_files[c].name = strdupz(name);
- all_files[c].hash = hash;
- all_files[c].type = type;
- all_files[c].pos = c;
- all_files[c].count = 1;
-#ifdef NETDATA_INTERNAL_CHECKS
- all_files[c].magic = 0x0BADCAFE;
-#endif /* NETDATA_INTERNAL_CHECKS */
- if(unlikely(file_descriptor_add(&all_files[c]) != (void *)&all_files[c]))
- netdata_log_error("INTERNAL ERROR: duplicate indexing of fd.");
-
- debug_log("using fd position %d (name: %s)", c, all_files[c].name);
-
- return c;
-}
-
-static inline int file_descriptor_find_or_add(const char *name, uint32_t hash) {
- if(unlikely(!hash))
- hash = simple_hash(name);
-
- debug_log("adding or finding name '%s' with hash %u", name, hash);
-
- struct file_descriptor *fd = file_descriptor_find(name, hash);
- if(fd) {
- // found
- debug_log(" >> found on slot %d", fd->pos);
-
- fd->count++;
- return fd->pos;
- }
- // not found
-
- FD_FILETYPE type;
- if(likely(name[0] == '/')) type = FILETYPE_FILE;
- else if(likely(strncmp(name, "pipe:", 5) == 0)) type = FILETYPE_PIPE;
- else if(likely(strncmp(name, "socket:", 7) == 0)) type = FILETYPE_SOCKET;
- else if(likely(strncmp(name, "anon_inode:", 11) == 0)) {
- const char *t = &name[11];
-
- if(strcmp(t, "inotify") == 0) type = FILETYPE_INOTIFY;
- else if(strcmp(t, "[eventfd]") == 0) type = FILETYPE_EVENTFD;
- else if(strcmp(t, "[eventpoll]") == 0) type = FILETYPE_EVENTPOLL;
- else if(strcmp(t, "[timerfd]") == 0) type = FILETYPE_TIMERFD;
- else if(strcmp(t, "[signalfd]") == 0) type = FILETYPE_SIGNALFD;
- else {
- debug_log("UNKNOWN anonymous inode: %s", name);
- type = FILETYPE_OTHER;
- }
- }
- else if(likely(strcmp(name, "inotify") == 0)) type = FILETYPE_INOTIFY;
- else {
- debug_log("UNKNOWN linkname: %s", name);
- type = FILETYPE_OTHER;
- }
-
- return file_descriptor_set_on_empty_slot(name, hash, type);
-}
-
-void clear_pid_fd(struct pid_fd *pfd) {
- pfd->fd = 0;
-
-#if !defined(__FreeBSD__) && !defined(__APPLE__)
- pfd->link_hash = 0;
- pfd->inode = 0;
- pfd->cache_iterations_counter = 0;
- pfd->cache_iterations_reset = 0;
-#endif
-}
-
-static inline void make_all_pid_fds_negative(struct pid_stat *p) {
- struct pid_fd *pfd = p->fds, *pfdend = &p->fds[p->fds_size];
- while(pfd < pfdend) {
- pfd->fd = -(pfd->fd);
- pfd++;
- }
-}
-
-static inline void cleanup_negative_pid_fds(struct pid_stat *p) {
- struct pid_fd *pfd = p->fds, *pfdend = &p->fds[p->fds_size];
-
- while(pfd < pfdend) {
- int fd = pfd->fd;
-
- if(unlikely(fd < 0)) {
- file_descriptor_not_used(-(fd));
- clear_pid_fd(pfd);
- }
-
- pfd++;
- }
-}
-
-void init_pid_fds(struct pid_stat *p, size_t first, size_t size) {
- struct pid_fd *pfd = &p->fds[first], *pfdend = &p->fds[first + size];
-
- while(pfd < pfdend) {
-#if !defined(__FreeBSD__) && !defined(__APPLE__)
- pfd->filename = NULL;
-#endif
- clear_pid_fd(pfd);
- pfd++;
- }
-}
-
-#ifdef __APPLE__
-static bool read_pid_file_descriptors_per_os(struct pid_stat *p, void *ptr __maybe_unused) {
- static struct proc_fdinfo *fds = NULL;
- static int fdsCapacity = 0;
-
- int bufferSize = proc_pidinfo(p->pid, PROC_PIDLISTFDS, 0, NULL, 0);
- if (bufferSize <= 0) {
- netdata_log_error("Failed to get the size of file descriptors for PID %d", p->pid);
- return false;
- }
-
- // Resize buffer if necessary
- if (bufferSize > fdsCapacity) {
- if(fds)
- freez(fds);
-
- fds = mallocz(bufferSize);
- fdsCapacity = bufferSize;
- }
-
- int num_fds = proc_pidinfo(p->pid, PROC_PIDLISTFDS, 0, fds, bufferSize) / PROC_PIDLISTFD_SIZE;
- if (num_fds <= 0) {
- netdata_log_error("Failed to get the file descriptors for PID %d", p->pid);
- return false;
- }
-
- for (int i = 0; i < num_fds; i++) {
- switch (fds[i].proc_fdtype) {
- case PROX_FDTYPE_VNODE: {
- struct vnode_fdinfowithpath vi;
- if (proc_pidfdinfo(p->pid, fds[i].proc_fd, PROC_PIDFDVNODEPATHINFO, &vi, sizeof(vi)) > 0)
- p->openfds.files++;
- else
- p->openfds.other++;
-
- break;
- }
- case PROX_FDTYPE_SOCKET: {
- p->openfds.sockets++;
- break;
- }
- case PROX_FDTYPE_PIPE: {
- p->openfds.pipes++;
- break;
- }
-
- default:
- p->openfds.other++;
- break;
- }
- }
-
- return true;
-}
-#endif // __APPLE__
-
-#if defined(__FreeBSD__)
-static bool read_pid_file_descriptors_per_os(struct pid_stat *p, void *ptr) {
- int mib[4];
- size_t size;
- struct kinfo_file *fds;
- static char *fdsbuf;
- char *bfdsbuf, *efdsbuf;
- char fdsname[FILENAME_MAX + 1];
-#define SHM_FORMAT_LEN 31 // format: 21 + size: 10
- char shm_name[FILENAME_MAX - SHM_FORMAT_LEN + 1];
-
- // we make all pid fds negative, so that
- // we can detect unused file descriptors
- // at the end, to free them
- make_all_pid_fds_negative(p);
-
- mib[0] = CTL_KERN;
- mib[1] = KERN_PROC;
- mib[2] = KERN_PROC_FILEDESC;
- mib[3] = p->pid;
-
- if (unlikely(sysctl(mib, 4, NULL, &size, NULL, 0))) {
- netdata_log_error("sysctl error: Can't get file descriptors data size for pid %d", p->pid);
- return false;
- }
- if (likely(size > 0))
- fdsbuf = reallocz(fdsbuf, size);
- if (unlikely(sysctl(mib, 4, fdsbuf, &size, NULL, 0))) {
- netdata_log_error("sysctl error: Can't get file descriptors data for pid %d", p->pid);
- return false;
- }
-
- bfdsbuf = fdsbuf;
- efdsbuf = fdsbuf + size;
- while (bfdsbuf < efdsbuf) {
- fds = (struct kinfo_file *)(uintptr_t)bfdsbuf;
- if (unlikely(fds->kf_structsize == 0))
- break;
-
- // do not process file descriptors for current working directory, root directory,
- // jail directory, ktrace vnode, text vnode and controlling terminal
- if (unlikely(fds->kf_fd < 0)) {
- bfdsbuf += fds->kf_structsize;
- continue;
- }
-
- // get file descriptors array index
- size_t fdid = fds->kf_fd;
-
- // check if the fds array is small
- if (unlikely(fdid >= p->fds_size)) {
- // it is small, extend it
-
- debug_log("extending fd memory slots for %s from %d to %d", p->comm, p->fds_size, fdid + MAX_SPARE_FDS);
-
- p->fds = reallocz(p->fds, (fdid + MAX_SPARE_FDS) * sizeof(struct pid_fd));
-
- // and initialize it
- init_pid_fds(p, p->fds_size, (fdid + MAX_SPARE_FDS) - p->fds_size);
- p->fds_size = fdid + MAX_SPARE_FDS;
- }
-
- if (unlikely(p->fds[fdid].fd == 0)) {
- // we don't know this fd, get it
-
- switch (fds->kf_type) {
- case KF_TYPE_FIFO:
- case KF_TYPE_VNODE:
- if (unlikely(!fds->kf_path[0])) {
- sprintf(fdsname, "other: inode: %lu", fds->kf_un.kf_file.kf_file_fileid);
- break;
- }
- sprintf(fdsname, "%s", fds->kf_path);
- break;
- case KF_TYPE_SOCKET:
- switch (fds->kf_sock_domain) {
- case AF_INET:
- case AF_INET6:
-#if __FreeBSD_version < 1400074
- if (fds->kf_sock_protocol == IPPROTO_TCP)
- sprintf(fdsname, "socket: %d %lx", fds->kf_sock_protocol, fds->kf_un.kf_sock.kf_sock_inpcb);
- else
-#endif
- sprintf(fdsname, "socket: %d %lx", fds->kf_sock_protocol, fds->kf_un.kf_sock.kf_sock_pcb);
- break;
- case AF_UNIX:
- /* print address of pcb and connected pcb */
- sprintf(fdsname, "socket: %lx %lx", fds->kf_un.kf_sock.kf_sock_pcb, fds->kf_un.kf_sock.kf_sock_unpconn);
- break;
- default:
- /* print protocol number and socket address */
-#if __FreeBSD_version < 1200031
- sprintf(fdsname, "socket: other: %d %s %s", fds->kf_sock_protocol, fds->kf_sa_local.__ss_pad1, fds->kf_sa_local.__ss_pad2);
-#else
- sprintf(fdsname, "socket: other: %d %s %s", fds->kf_sock_protocol, fds->kf_un.kf_sock.kf_sa_local.__ss_pad1, fds->kf_un.kf_sock.kf_sa_local.__ss_pad2);
-#endif
- }
- break;
- case KF_TYPE_PIPE:
- sprintf(fdsname, "pipe: %lu %lu", fds->kf_un.kf_pipe.kf_pipe_addr, fds->kf_un.kf_pipe.kf_pipe_peer);
- break;
- case KF_TYPE_PTS:
-#if __FreeBSD_version < 1200031
- sprintf(fdsname, "other: pts: %u", fds->kf_un.kf_pts.kf_pts_dev);
-#else
- sprintf(fdsname, "other: pts: %lu", fds->kf_un.kf_pts.kf_pts_dev);
-#endif
- break;
- case KF_TYPE_SHM:
- strncpyz(shm_name, fds->kf_path, FILENAME_MAX - SHM_FORMAT_LEN);
- sprintf(fdsname, "other: shm: %s size: %lu", shm_name, fds->kf_un.kf_file.kf_file_size);
- break;
- case KF_TYPE_SEM:
- sprintf(fdsname, "other: sem: %u", fds->kf_un.kf_sem.kf_sem_value);
- break;
- default:
- sprintf(fdsname, "other: pid: %d fd: %d", fds->kf_un.kf_proc.kf_pid, fds->kf_fd);
- }
-
- // if another process already has this, we will get
- // the same id
- p->fds[fdid].fd = file_descriptor_find_or_add(fdsname, 0);
- }
-
- // else make it positive again, we need it
- // of course, the actual file may have changed
-
- else
- p->fds[fdid].fd = -p->fds[fdid].fd;
-
- bfdsbuf += fds->kf_structsize;
- }
-
- return true;
-}
-#endif // __FreeBSD__
-
-#if !defined(__FreeBSD__) && !defined(__APPLE__)
-static bool read_pid_file_descriptors_per_os(struct pid_stat *p, void *ptr __maybe_unused) {
- if(unlikely(!p->fds_dirname)) {
- char dirname[FILENAME_MAX+1];
- snprintfz(dirname, FILENAME_MAX, "%s/proc/%d/fd", netdata_configured_host_prefix, p->pid);
- p->fds_dirname = strdupz(dirname);
- }
-
- DIR *fds = opendir(p->fds_dirname);
- if(unlikely(!fds)) return false;
-
- struct dirent *de;
- char linkname[FILENAME_MAX + 1];
-
- // we make all pid fds negative, so that
- // we can detect unused file descriptors
- // at the end, to free them
- make_all_pid_fds_negative(p);
-
- while((de = readdir(fds))) {
- // we need only files with numeric names
-
- if(unlikely(de->d_name[0] < '0' || de->d_name[0] > '9'))
- continue;
-
- // get its number
- int fdid = (int) str2l(de->d_name);
- if(unlikely(fdid < 0)) continue;
-
- // check if the fds array is small
- if(unlikely((size_t)fdid >= p->fds_size)) {
- // it is small, extend it
-
- debug_log("extending fd memory slots for %s from %d to %d"
- , p->comm
- , p->fds_size
- , fdid + MAX_SPARE_FDS
- );
-
- p->fds = reallocz(p->fds, (fdid + MAX_SPARE_FDS) * sizeof(struct pid_fd));
-
- // and initialize it
- init_pid_fds(p, p->fds_size, (fdid + MAX_SPARE_FDS) - p->fds_size);
- p->fds_size = (size_t)fdid + MAX_SPARE_FDS;
- }
-
- if(unlikely(p->fds[fdid].fd < 0 && de->d_ino != p->fds[fdid].inode)) {
- // inodes do not match, clear the previous entry
- inodes_changed_counter++;
- file_descriptor_not_used(-p->fds[fdid].fd);
- clear_pid_fd(&p->fds[fdid]);
- }
-
- if(p->fds[fdid].fd < 0 && p->fds[fdid].cache_iterations_counter > 0) {
- p->fds[fdid].fd = -p->fds[fdid].fd;
- p->fds[fdid].cache_iterations_counter--;
- continue;
- }
-
- if(unlikely(!p->fds[fdid].filename)) {
- filenames_allocated_counter++;
- char fdname[FILENAME_MAX + 1];
- snprintfz(fdname, FILENAME_MAX, "%s/proc/%d/fd/%s", netdata_configured_host_prefix, p->pid, de->d_name);
- p->fds[fdid].filename = strdupz(fdname);
- }
-
- file_counter++;
- ssize_t l = readlink(p->fds[fdid].filename, linkname, FILENAME_MAX);
- if(unlikely(l == -1)) {
- // cannot read the link
-
- if(debug_enabled || (p->target && p->target->debug_enabled))
- netdata_log_error("Cannot read link %s", p->fds[fdid].filename);
-
- if(unlikely(p->fds[fdid].fd < 0)) {
- file_descriptor_not_used(-p->fds[fdid].fd);
- clear_pid_fd(&p->fds[fdid]);
- }
-
- continue;
- }
- else
- linkname[l] = '\0';
-
- uint32_t link_hash = simple_hash(linkname);
-
- if(unlikely(p->fds[fdid].fd < 0 && p->fds[fdid].link_hash != link_hash)) {
- // the link changed
- links_changed_counter++;
- file_descriptor_not_used(-p->fds[fdid].fd);
- clear_pid_fd(&p->fds[fdid]);
- }
-
- if(unlikely(p->fds[fdid].fd == 0)) {
- // we don't know this fd, get it
-
- // if another process already has this, we will get
- // the same id
- p->fds[fdid].fd = file_descriptor_find_or_add(linkname, link_hash);
- p->fds[fdid].inode = de->d_ino;
- p->fds[fdid].link_hash = link_hash;
- }
- else {
- // else make it positive again, we need it
- p->fds[fdid].fd = -p->fds[fdid].fd;
- }
-
- // caching control
- // without this we read all the files on every iteration
- if(max_fds_cache_seconds > 0) {
- size_t spread = ((size_t)max_fds_cache_seconds > 10) ? 10 : (size_t)max_fds_cache_seconds;
-
- // cache it for a few iterations
- size_t max = ((size_t) max_fds_cache_seconds + (fdid % spread)) / (size_t) update_every;
- p->fds[fdid].cache_iterations_reset++;
-
- if(unlikely(p->fds[fdid].cache_iterations_reset % spread == (size_t) fdid % spread))
- p->fds[fdid].cache_iterations_reset++;
-
- if(unlikely((fdid <= 2 && p->fds[fdid].cache_iterations_reset > 5) ||
- p->fds[fdid].cache_iterations_reset > max)) {
- // for stdin, stdout, stderr (fdid <= 2) we have checked a few times, or if it goes above the max, goto max
- p->fds[fdid].cache_iterations_reset = max;
- }
-
- p->fds[fdid].cache_iterations_counter = p->fds[fdid].cache_iterations_reset;
- }
- }
-
- closedir(fds);
-
- return true;
-}
-#endif // !__FreeBSD__ !__APPLE
-
-int read_pid_file_descriptors(struct pid_stat *p, void *ptr) {
- bool ret = read_pid_file_descriptors_per_os(p, ptr);
- cleanup_negative_pid_fds(p);
-
- return ret ? 1 : 0;
-}
diff --git a/src/collectors/apps.plugin/apps_proc_pid_io.c b/src/collectors/apps.plugin/apps_proc_pid_io.c
deleted file mode 100644
index 0fef3fc24..000000000
--- a/src/collectors/apps.plugin/apps_proc_pid_io.c
+++ /dev/null
@@ -1,95 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-#include "apps_plugin.h"
-
-static inline void clear_pid_io(struct pid_stat *p) {
- p->io_logical_bytes_read = 0;
- p->io_logical_bytes_written = 0;
- p->io_read_calls = 0;
- p->io_write_calls = 0;
- p->io_storage_bytes_read = 0;
- p->io_storage_bytes_written = 0;
- p->io_cancelled_write_bytes = 0;
-}
-
-#if defined(__FreeBSD__)
-static inline bool read_proc_pid_io_per_os(struct pid_stat *p, void *ptr) {
- struct kinfo_proc *proc_info = (struct kinfo_proc *)ptr;
-
- pid_incremental_rate(io, p->io_storage_bytes_read, proc_info->ki_rusage.ru_inblock);
- pid_incremental_rate(io, p->io_storage_bytes_written, proc_info->ki_rusage.ru_oublock);
-
- p->io_logical_bytes_read = 0;
- p->io_logical_bytes_written = 0;
- p->io_read_calls = 0;
- p->io_write_calls = 0;
- p->io_cancelled_write_bytes = 0;
-
- return true;
-}
-#endif
-
-#ifdef __APPLE__
-static inline bool read_proc_pid_io_per_os(struct pid_stat *p, void *ptr) {
- struct pid_info *pi = ptr;
-
- // On MacOS, the proc_pid_rusage provides disk_io_statistics which includes io bytes read and written
- // but does not provide the same level of detail as Linux, like separating logical and physical I/O bytes.
- pid_incremental_rate(io, p->io_storage_bytes_read, pi->rusageinfo.ri_diskio_bytesread);
- pid_incremental_rate(io, p->io_storage_bytes_written, pi->rusageinfo.ri_diskio_byteswritten);
-
- p->io_logical_bytes_read = 0;
- p->io_logical_bytes_written = 0;
- p->io_read_calls = 0;
- p->io_write_calls = 0;
- p->io_cancelled_write_bytes = 0;
-
- return true;
-}
-#endif // __APPLE__
-
-#if !defined(__FreeBSD__) && !defined(__APPLE__)
-static inline int read_proc_pid_io_per_os(struct pid_stat *p, void *ptr __maybe_unused) {
- static procfile *ff = NULL;
-
- if(unlikely(!p->io_filename)) {
- char filename[FILENAME_MAX + 1];
- snprintfz(filename, FILENAME_MAX, "%s/proc/%d/io", netdata_configured_host_prefix, p->pid);
- p->io_filename = strdupz(filename);
- }
-
- // open the file
- ff = procfile_reopen(ff, p->io_filename, NULL, PROCFILE_FLAG_NO_ERROR_ON_FILE_IO);
- if(unlikely(!ff)) goto cleanup;
-
- ff = procfile_readall(ff);
- if(unlikely(!ff)) goto cleanup;
-
- pid_incremental_rate(io, p->io_logical_bytes_read, str2kernel_uint_t(procfile_lineword(ff, 0, 1)));
- pid_incremental_rate(io, p->io_logical_bytes_written, str2kernel_uint_t(procfile_lineword(ff, 1, 1)));
- pid_incremental_rate(io, p->io_read_calls, str2kernel_uint_t(procfile_lineword(ff, 2, 1)));
- pid_incremental_rate(io, p->io_write_calls, str2kernel_uint_t(procfile_lineword(ff, 3, 1)));
- pid_incremental_rate(io, p->io_storage_bytes_read, str2kernel_uint_t(procfile_lineword(ff, 4, 1)));
- pid_incremental_rate(io, p->io_storage_bytes_written, str2kernel_uint_t(procfile_lineword(ff, 5, 1)));
- pid_incremental_rate(io, p->io_cancelled_write_bytes, str2kernel_uint_t(procfile_lineword(ff, 6, 1)));
-
- return true;
-
-cleanup:
- clear_pid_io(p);
- return false;
-}
-#endif // !__FreeBSD__ !__APPLE__
-
-int read_proc_pid_io(struct pid_stat *p, void *ptr) {
- p->last_io_collected_usec = p->io_collected_usec;
- p->io_collected_usec = now_monotonic_usec();
- calls_counter++;
-
- bool ret = read_proc_pid_io_per_os(p, ptr);
-
- if(unlikely(global_iterations_counter == 1))
- clear_pid_io(p);
-
- return ret ? 1 : 0;
-}
diff --git a/src/collectors/apps.plugin/apps_proc_pid_limits.c b/src/collectors/apps.plugin/apps_proc_pid_limits.c
deleted file mode 100644
index 7485086ba..000000000
--- a/src/collectors/apps.plugin/apps_proc_pid_limits.c
+++ /dev/null
@@ -1,151 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-#include "apps_plugin.h"
-
-// ----------------------------------------------------------------------------
-
-#define MAX_PROC_PID_LIMITS 8192
-#define PROC_PID_LIMITS_MAX_OPEN_FILES_KEY "\nMax open files "
-
-static inline kernel_uint_t get_proc_pid_limits_limit(char *buf, const char *key, size_t key_len, kernel_uint_t def) {
- char *line = strstr(buf, key);
- if(!line)
- return def;
-
- char *v = &line[key_len];
- while(isspace(*v)) v++;
-
- if(strcmp(v, "unlimited") == 0)
- return 0;
-
- return str2ull(v, NULL);
-}
-
-#if defined(__FreeBSD__) || defined(__APPLE__)
-int read_proc_pid_limits_per_os(struct pid_stat *p, void *ptr __maybe_unused) {
- return false;
-}
-#endif
-
-#if !defined(__FreeBSD__) && !defined(__APPLE__)
-static inline bool read_proc_pid_limits_per_os(struct pid_stat *p, void *ptr __maybe_unused) {
- static char proc_pid_limits_buffer[MAX_PROC_PID_LIMITS + 1];
- bool ret = false;
- bool read_limits = false;
-
- errno_clear();
- proc_pid_limits_buffer[0] = '\0';
-
- kernel_uint_t all_fds = pid_openfds_sum(p);
- if(all_fds < p->limits.max_open_files / 2 && p->io_collected_usec > p->last_limits_collected_usec && p->io_collected_usec - p->last_limits_collected_usec <= 60 * USEC_PER_SEC) {
- // too frequent, we want to collect limits once per minute
- ret = true;
- goto cleanup;
- }
-
- if(unlikely(!p->limits_filename)) {
- char filename[FILENAME_MAX + 1];
- snprintfz(filename, FILENAME_MAX, "%s/proc/%d/limits", netdata_configured_host_prefix, p->pid);
- p->limits_filename = strdupz(filename);
- }
-
- int fd = open(p->limits_filename, procfile_open_flags, 0666);
- if(unlikely(fd == -1)) goto cleanup;
-
- ssize_t bytes = read(fd, proc_pid_limits_buffer, MAX_PROC_PID_LIMITS);
- close(fd);
-
- if(bytes <= 0)
- goto cleanup;
-
- // make it '\0' terminated
- if(bytes < MAX_PROC_PID_LIMITS)
- proc_pid_limits_buffer[bytes] = '\0';
- else
- proc_pid_limits_buffer[MAX_PROC_PID_LIMITS - 1] = '\0';
-
- p->limits.max_open_files = get_proc_pid_limits_limit(proc_pid_limits_buffer, PROC_PID_LIMITS_MAX_OPEN_FILES_KEY, sizeof(PROC_PID_LIMITS_MAX_OPEN_FILES_KEY) - 1, 0);
- if(p->limits.max_open_files == 1) {
- // it seems a bug in the kernel or something similar
- // it sets max open files to 1 but the number of files
- // the process has open are more than 1...
- // https://github.com/netdata/netdata/issues/15443
- p->limits.max_open_files = 0;
- ret = true;
- goto cleanup;
- }
-
- p->last_limits_collected_usec = p->io_collected_usec;
- read_limits = true;
-
- ret = true;
-
-cleanup:
- if(p->limits.max_open_files)
- p->openfds_limits_percent = (NETDATA_DOUBLE)all_fds * 100.0 / (NETDATA_DOUBLE)p->limits.max_open_files;
- else
- p->openfds_limits_percent = 0.0;
-
- if(p->openfds_limits_percent > 100.0) {
- if(!(p->log_thrown & PID_LOG_LIMITS_DETAIL)) {
- char *line;
-
- if(!read_limits) {
- proc_pid_limits_buffer[0] = '\0';
- line = "NOT READ";
- }
- else {
- line = strstr(proc_pid_limits_buffer, PROC_PID_LIMITS_MAX_OPEN_FILES_KEY);
- if (line) {
- line++; // skip the initial newline
-
- char *end = strchr(line, '\n');
- if (end)
- *end = '\0';
- }
- }
-
- netdata_log_info(
- "FDS_LIMITS: PID %d (%s) is using "
- "%0.2f %% of its fds limits, "
- "open fds = %"PRIu64 "("
- "files = %"PRIu64 ", "
- "pipes = %"PRIu64 ", "
- "sockets = %"PRIu64", "
- "inotifies = %"PRIu64", "
- "eventfds = %"PRIu64", "
- "timerfds = %"PRIu64", "
- "signalfds = %"PRIu64", "
- "eventpolls = %"PRIu64" "
- "other = %"PRIu64" "
- "), open fds limit = %"PRIu64", "
- "%s, "
- "original line [%s]",
- p->pid, p->comm, p->openfds_limits_percent, all_fds,
- p->openfds.files,
- p->openfds.pipes,
- p->openfds.sockets,
- p->openfds.inotifies,
- p->openfds.eventfds,
- p->openfds.timerfds,
- p->openfds.signalfds,
- p->openfds.eventpolls,
- p->openfds.other,
- p->limits.max_open_files,
- read_limits ? "and we have read the limits AFTER counting the fds"
- : "but we have read the limits BEFORE counting the fds",
- line);
-
- p->log_thrown |= PID_LOG_LIMITS_DETAIL;
- }
- }
- else
- p->log_thrown &= ~PID_LOG_LIMITS_DETAIL;
-
- return ret;
-}
-#endif // !__FreeBSD__ !__APPLE__
-
-int read_proc_pid_limits(struct pid_stat *p, void *ptr) {
- return read_proc_pid_limits_per_os(p, ptr) ? 1 : 0;
-}
diff --git a/src/collectors/apps.plugin/apps_proc_pid_stat.c b/src/collectors/apps.plugin/apps_proc_pid_stat.c
deleted file mode 100644
index 8767f7831..000000000
--- a/src/collectors/apps.plugin/apps_proc_pid_stat.c
+++ /dev/null
@@ -1,293 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-#include "apps_plugin.h"
-
-// ----------------------------------------------------------------------------
-
-static inline void assign_target_to_pid(struct pid_stat *p) {
- targets_assignment_counter++;
-
- uint32_t hash = simple_hash(p->comm);
- size_t pclen = strlen(p->comm);
-
- struct target *w;
- for(w = apps_groups_root_target; w ; w = w->next) {
- // if(debug_enabled || (p->target && p->target->debug_enabled)) debug_log_int("\t\tcomparing '%s' with '%s'", w->compare, p->comm);
-
- // find it - 4 cases:
- // 1. the target is not a pattern
- // 2. the target has the prefix
- // 3. the target has the suffix
- // 4. the target is something inside cmdline
-
- if(unlikely(( (!w->starts_with && !w->ends_with && w->comparehash == hash && !strcmp(w->compare, p->comm))
- || (w->starts_with && !w->ends_with && !strncmp(w->compare, p->comm, w->comparelen))
- || (!w->starts_with && w->ends_with && pclen >= w->comparelen && !strcmp(w->compare, &p->comm[pclen - w->comparelen]))
- || (proc_pid_cmdline_is_needed && w->starts_with && w->ends_with && p->cmdline && strstr(p->cmdline, w->compare))
- ))) {
-
- p->matched_by_config = true;
- if(w->target) p->target = w->target;
- else p->target = w;
-
- if(debug_enabled || (p->target && p->target->debug_enabled))
- debug_log_int("%s linked to target %s", p->comm, p->target->name);
-
- break;
- }
- }
-}
-
-static inline void update_pid_comm(struct pid_stat *p, const char *comm) {
- if(strcmp(p->comm, comm) != 0) {
- if(unlikely(debug_enabled)) {
- if(p->comm[0])
- debug_log("\tpid %d (%s) changed name to '%s'", p->pid, p->comm, comm);
- else
- debug_log("\tJust added %d (%s)", p->pid, comm);
- }
-
- strncpyz(p->comm, comm, MAX_COMPARE_NAME);
-
- // /proc/<pid>/cmdline
- if(likely(proc_pid_cmdline_is_needed))
- managed_log(p, PID_LOG_CMDLINE, read_proc_pid_cmdline(p));
-
- assign_target_to_pid(p);
- }
-}
-
-static inline void clear_pid_stat(struct pid_stat *p, bool threads) {
- p->minflt = 0;
- p->cminflt = 0;
- p->majflt = 0;
- p->cmajflt = 0;
- p->utime = 0;
- p->stime = 0;
- p->gtime = 0;
- p->cutime = 0;
- p->cstime = 0;
- p->cgtime = 0;
-
- if(threads)
- p->num_threads = 0;
-
- // p->rss = 0;
-}
-
-#if defined(__FreeBSD__)
-static inline bool read_proc_pid_stat_per_os(struct pid_stat *p, void *ptr) {
- struct kinfo_proc *proc_info = (struct kinfo_proc *)ptr;
- if (unlikely(proc_info->ki_tdflags & TDF_IDLETD))
- goto cleanup;
-
- char *comm = proc_info->ki_comm;
- p->ppid = proc_info->ki_ppid;
-
- update_pid_comm(p, comm);
-
- pid_incremental_rate(stat, p->minflt, (kernel_uint_t)proc_info->ki_rusage.ru_minflt);
- pid_incremental_rate(stat, p->cminflt, (kernel_uint_t)proc_info->ki_rusage_ch.ru_minflt);
- pid_incremental_rate(stat, p->majflt, (kernel_uint_t)proc_info->ki_rusage.ru_majflt);
- pid_incremental_rate(stat, p->cmajflt, (kernel_uint_t)proc_info->ki_rusage_ch.ru_majflt);
- pid_incremental_rate(stat, p->utime, (kernel_uint_t)proc_info->ki_rusage.ru_utime.tv_sec * 100 + proc_info->ki_rusage.ru_utime.tv_usec / 10000);
- pid_incremental_rate(stat, p->stime, (kernel_uint_t)proc_info->ki_rusage.ru_stime.tv_sec * 100 + proc_info->ki_rusage.ru_stime.tv_usec / 10000);
- pid_incremental_rate(stat, p->cutime, (kernel_uint_t)proc_info->ki_rusage_ch.ru_utime.tv_sec * 100 + proc_info->ki_rusage_ch.ru_utime.tv_usec / 10000);
- pid_incremental_rate(stat, p->cstime, (kernel_uint_t)proc_info->ki_rusage_ch.ru_stime.tv_sec * 100 + proc_info->ki_rusage_ch.ru_stime.tv_usec / 10000);
-
- p->num_threads = proc_info->ki_numthreads;
-
- usec_t started_ut = timeval_usec(&proc_info->ki_start);
- p->uptime = (system_current_time_ut > started_ut) ? (system_current_time_ut - started_ut) / USEC_PER_SEC : 0;
-
- if(enable_guest_charts) {
- enable_guest_charts = false;
- netdata_log_info("Guest charts aren't supported by FreeBSD");
- }
-
- if(unlikely(debug_enabled || (p->target && p->target->debug_enabled)))
- debug_log_int("READ PROC/PID/STAT: %s/proc/%d/stat, process: '%s' on target '%s' (dt=%llu) VALUES: utime=" KERNEL_UINT_FORMAT ", stime=" KERNEL_UINT_FORMAT ", cutime=" KERNEL_UINT_FORMAT ", cstime=" KERNEL_UINT_FORMAT ", minflt=" KERNEL_UINT_FORMAT ", majflt=" KERNEL_UINT_FORMAT ", cminflt=" KERNEL_UINT_FORMAT ", cmajflt=" KERNEL_UINT_FORMAT ", threads=%d", netdata_configured_host_prefix, p->pid, p->comm, (p->target)?p->target->name:"UNSET", p->stat_collected_usec - p->last_stat_collected_usec, p->utime, p->stime, p->cutime, p->cstime, p->minflt, p->majflt, p->cminflt, p->cmajflt, p->num_threads);
-
- if(unlikely(global_iterations_counter == 1))
- clear_pid_stat(p, false);
-
- return true;
-
-cleanup:
- clear_pid_stat(p, true);
- return false;
-}
-#endif // __FreeBSD__
-
-#ifdef __APPLE__
-static inline bool read_proc_pid_stat_per_os(struct pid_stat *p, void *ptr) {
- struct pid_info *pi = ptr;
-
- p->ppid = pi->proc.kp_eproc.e_ppid;
-
- // Update command name and target if changed
- char comm[PROC_PIDPATHINFO_MAXSIZE];
- int ret = proc_name(p->pid, comm, sizeof(comm));
- if (ret <= 0)
- strncpyz(comm, "unknown", sizeof(comm) - 1);
-
- update_pid_comm(p, comm);
-
- kernel_uint_t userCPU = (pi->taskinfo.pti_total_user * mach_info.numer) / mach_info.denom / NSEC_PER_USEC / 10000;
- kernel_uint_t systemCPU = (pi->taskinfo.pti_total_system * mach_info.numer) / mach_info.denom / NSEC_PER_USEC / 10000;
-
- // Map the values from taskinfo to the pid_stat structure
- pid_incremental_rate(stat, p->minflt, pi->taskinfo.pti_faults);
- pid_incremental_rate(stat, p->majflt, pi->taskinfo.pti_pageins);
- pid_incremental_rate(stat, p->utime, userCPU);
- pid_incremental_rate(stat, p->stime, systemCPU);
- p->num_threads = pi->taskinfo.pti_threadnum;
-
- usec_t started_ut = timeval_usec(&pi->proc.kp_proc.p_starttime);
- p->uptime = (system_current_time_ut > started_ut) ? (system_current_time_ut - started_ut) / USEC_PER_SEC : 0;
-
- // Note: Some values such as guest time, cutime, cstime, etc., are not directly available in MacOS.
- // You might need to approximate or leave them unset depending on your needs.
-
- if(unlikely(debug_enabled || (p->target && p->target->debug_enabled))) {
- debug_log_int("READ PROC/PID/STAT for MacOS: process: '%s' on target '%s' VALUES: utime=" KERNEL_UINT_FORMAT ", stime=" KERNEL_UINT_FORMAT ", minflt=" KERNEL_UINT_FORMAT ", majflt=" KERNEL_UINT_FORMAT ", threads=%d",
- p->comm, (p->target) ? p->target->name : "UNSET", p->utime, p->stime, p->minflt, p->majflt, p->num_threads);
- }
-
- if(unlikely(global_iterations_counter == 1))
- clear_pid_stat(p, false);
-
- // MacOS doesn't have a direct concept of process state like Linux,
- // so updating process state count might need a different approach.
-
- return true;
-}
-#endif // __APPLE__
-
-#if !defined(__FreeBSD__) && !defined(__APPLE__)
-static inline void update_proc_state_count(char proc_stt) {
- switch (proc_stt) {
- case 'S':
- proc_state_count[PROC_STATUS_SLEEPING] += 1;
- break;
- case 'R':
- proc_state_count[PROC_STATUS_RUNNING] += 1;
- break;
- case 'D':
- proc_state_count[PROC_STATUS_SLEEPING_D] += 1;
- break;
- case 'Z':
- proc_state_count[PROC_STATUS_ZOMBIE] += 1;
- break;
- case 'T':
- proc_state_count[PROC_STATUS_STOPPED] += 1;
- break;
- default:
- break;
- }
-}
-
-static inline bool read_proc_pid_stat_per_os(struct pid_stat *p, void *ptr __maybe_unused) {
- static procfile *ff = NULL;
-
- if(unlikely(!p->stat_filename)) {
- char filename[FILENAME_MAX + 1];
- snprintfz(filename, FILENAME_MAX, "%s/proc/%d/stat", netdata_configured_host_prefix, p->pid);
- p->stat_filename = strdupz(filename);
- }
-
- int set_quotes = (!ff)?1:0;
-
- ff = procfile_reopen(ff, p->stat_filename, NULL, PROCFILE_FLAG_NO_ERROR_ON_FILE_IO);
- if(unlikely(!ff)) goto cleanup;
-
- // if(set_quotes) procfile_set_quotes(ff, "()");
- if(unlikely(set_quotes))
- procfile_set_open_close(ff, "(", ")");
-
- ff = procfile_readall(ff);
- if(unlikely(!ff)) goto cleanup;
-
- // p->pid = str2pid_t(procfile_lineword(ff, 0, 0));
- char *comm = procfile_lineword(ff, 0, 1);
- p->state = *(procfile_lineword(ff, 0, 2));
- p->ppid = (int32_t)str2pid_t(procfile_lineword(ff, 0, 3));
- // p->pgrp = (int32_t)str2pid_t(procfile_lineword(ff, 0, 4));
- // p->session = (int32_t)str2pid_t(procfile_lineword(ff, 0, 5));
- // p->tty_nr = (int32_t)str2pid_t(procfile_lineword(ff, 0, 6));
- // p->tpgid = (int32_t)str2pid_t(procfile_lineword(ff, 0, 7));
- // p->flags = str2uint64_t(procfile_lineword(ff, 0, 8));
-
- update_pid_comm(p, comm);
-
- pid_incremental_rate(stat, p->minflt, str2kernel_uint_t(procfile_lineword(ff, 0, 9)));
- pid_incremental_rate(stat, p->cminflt, str2kernel_uint_t(procfile_lineword(ff, 0, 10)));
- pid_incremental_rate(stat, p->majflt, str2kernel_uint_t(procfile_lineword(ff, 0, 11)));
- pid_incremental_rate(stat, p->cmajflt, str2kernel_uint_t(procfile_lineword(ff, 0, 12)));
- pid_incremental_rate(stat, p->utime, str2kernel_uint_t(procfile_lineword(ff, 0, 13)));
- pid_incremental_rate(stat, p->stime, str2kernel_uint_t(procfile_lineword(ff, 0, 14)));
- pid_incremental_rate(stat, p->cutime, str2kernel_uint_t(procfile_lineword(ff, 0, 15)));
- pid_incremental_rate(stat, p->cstime, str2kernel_uint_t(procfile_lineword(ff, 0, 16)));
- // p->priority = str2kernel_uint_t(procfile_lineword(ff, 0, 17));
- // p->nice = str2kernel_uint_t(procfile_lineword(ff, 0, 18));
- p->num_threads = (int32_t) str2uint32_t(procfile_lineword(ff, 0, 19), NULL);
- // p->itrealvalue = str2kernel_uint_t(procfile_lineword(ff, 0, 20));
- kernel_uint_t collected_starttime = str2kernel_uint_t(procfile_lineword(ff, 0, 21)) / system_hz;
- p->uptime = (system_uptime_secs > collected_starttime)?(system_uptime_secs - collected_starttime):0;
- // p->vsize = str2kernel_uint_t(procfile_lineword(ff, 0, 22));
- // p->rss = str2kernel_uint_t(procfile_lineword(ff, 0, 23));
- // p->rsslim = str2kernel_uint_t(procfile_lineword(ff, 0, 24));
- // p->starcode = str2kernel_uint_t(procfile_lineword(ff, 0, 25));
- // p->endcode = str2kernel_uint_t(procfile_lineword(ff, 0, 26));
- // p->startstack = str2kernel_uint_t(procfile_lineword(ff, 0, 27));
- // p->kstkesp = str2kernel_uint_t(procfile_lineword(ff, 0, 28));
- // p->kstkeip = str2kernel_uint_t(procfile_lineword(ff, 0, 29));
- // p->signal = str2kernel_uint_t(procfile_lineword(ff, 0, 30));
- // p->blocked = str2kernel_uint_t(procfile_lineword(ff, 0, 31));
- // p->sigignore = str2kernel_uint_t(procfile_lineword(ff, 0, 32));
- // p->sigcatch = str2kernel_uint_t(procfile_lineword(ff, 0, 33));
- // p->wchan = str2kernel_uint_t(procfile_lineword(ff, 0, 34));
- // p->nswap = str2kernel_uint_t(procfile_lineword(ff, 0, 35));
- // p->cnswap = str2kernel_uint_t(procfile_lineword(ff, 0, 36));
- // p->exit_signal = str2kernel_uint_t(procfile_lineword(ff, 0, 37));
- // p->processor = str2kernel_uint_t(procfile_lineword(ff, 0, 38));
- // p->rt_priority = str2kernel_uint_t(procfile_lineword(ff, 0, 39));
- // p->policy = str2kernel_uint_t(procfile_lineword(ff, 0, 40));
- // p->delayacct_blkio_ticks = str2kernel_uint_t(procfile_lineword(ff, 0, 41));
-
- if(enable_guest_charts) {
- pid_incremental_rate(stat, p->gtime, str2kernel_uint_t(procfile_lineword(ff, 0, 42)));
- pid_incremental_rate(stat, p->cgtime, str2kernel_uint_t(procfile_lineword(ff, 0, 43)));
-
- if (show_guest_time || p->gtime || p->cgtime) {
- p->utime -= (p->utime >= p->gtime) ? p->gtime : p->utime;
- p->cutime -= (p->cutime >= p->cgtime) ? p->cgtime : p->cutime;
- show_guest_time = 1;
- }
- }
-
- if(unlikely(debug_enabled || (p->target && p->target->debug_enabled)))
- debug_log_int("READ PROC/PID/STAT: %s/proc/%d/stat, process: '%s' on target '%s' (dt=%llu) VALUES: utime=" KERNEL_UINT_FORMAT ", stime=" KERNEL_UINT_FORMAT ", cutime=" KERNEL_UINT_FORMAT ", cstime=" KERNEL_UINT_FORMAT ", minflt=" KERNEL_UINT_FORMAT ", majflt=" KERNEL_UINT_FORMAT ", cminflt=" KERNEL_UINT_FORMAT ", cmajflt=" KERNEL_UINT_FORMAT ", threads=%d", netdata_configured_host_prefix, p->pid, p->comm, (p->target)?p->target->name:"UNSET", p->stat_collected_usec - p->last_stat_collected_usec, p->utime, p->stime, p->cutime, p->cstime, p->minflt, p->majflt, p->cminflt, p->cmajflt, p->num_threads);
-
- if(unlikely(global_iterations_counter == 1))
- clear_pid_stat(p, false);
-
- update_proc_state_count(p->state);
- return true;
-
-cleanup:
- clear_pid_stat(p, true);
- return false;
-}
-#endif // !__FreeBSD__ !__APPLE__
-
-int read_proc_pid_stat(struct pid_stat *p, void *ptr) {
- p->last_stat_collected_usec = p->stat_collected_usec;
- p->stat_collected_usec = now_monotonic_usec();
- calls_counter++;
-
- if(!read_proc_pid_stat_per_os(p, ptr))
- return 0;
-
- return 1;
-}
diff --git a/src/collectors/apps.plugin/apps_proc_pid_status.c b/src/collectors/apps.plugin/apps_proc_pid_status.c
deleted file mode 100644
index 364d48047..000000000
--- a/src/collectors/apps.plugin/apps_proc_pid_status.c
+++ /dev/null
@@ -1,192 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-#include "apps_plugin.h"
-
-#if defined(__FreeBSD__)
-static inline bool read_proc_pid_status_per_os(struct pid_stat *p, void *ptr) {
- struct kinfo_proc *proc_info = (struct kinfo_proc *)ptr;
-
- p->uid = proc_info->ki_uid;
- p->gid = proc_info->ki_groups[0];
- p->status_vmsize = proc_info->ki_size / 1024; // in KiB
- p->status_vmrss = proc_info->ki_rssize * pagesize / 1024; // in KiB
- // TODO: what about shared and swap memory on FreeBSD?
- return true;
-}
-#endif
-
-#ifdef __APPLE__
-static inline bool read_proc_pid_status_per_os(struct pid_stat *p, void *ptr) {
- struct pid_info *pi = ptr;
-
- p->uid = pi->bsdinfo.pbi_uid;
- p->gid = pi->bsdinfo.pbi_gid;
- p->status_vmsize = pi->taskinfo.pti_virtual_size / 1024; // Convert bytes to KiB
- p->status_vmrss = pi->taskinfo.pti_resident_size / 1024; // Convert bytes to KiB
- // p->status_vmswap = rusageinfo.ri_swapins + rusageinfo.ri_swapouts; // This is not directly available, consider an alternative representation
- p->status_voluntary_ctxt_switches = pi->taskinfo.pti_csw;
- // p->status_nonvoluntary_ctxt_switches = taskinfo.pti_nivcsw;
-
- return true;
-}
-#endif // __APPLE__
-
-#if !defined(__FreeBSD__) && !defined(__APPLE__)
-struct arl_callback_ptr {
- struct pid_stat *p;
- procfile *ff;
- size_t line;
-};
-
-void arl_callback_status_uid(const char *name, uint32_t hash, const char *value, void *dst) {
- (void)name; (void)hash; (void)value;
- struct arl_callback_ptr *aptr = (struct arl_callback_ptr *)dst;
- if(unlikely(procfile_linewords(aptr->ff, aptr->line) < 5)) return;
-
- //const char *real_uid = procfile_lineword(aptr->ff, aptr->line, 1);
- const char *effective_uid = procfile_lineword(aptr->ff, aptr->line, 2);
- //const char *saved_uid = procfile_lineword(aptr->ff, aptr->line, 3);
- //const char *filesystem_uid = procfile_lineword(aptr->ff, aptr->line, 4);
-
- if(likely(effective_uid && *effective_uid))
- aptr->p->uid = (uid_t)str2l(effective_uid);
-}
-
-void arl_callback_status_gid(const char *name, uint32_t hash, const char *value, void *dst) {
- (void)name; (void)hash; (void)value;
- struct arl_callback_ptr *aptr = (struct arl_callback_ptr *)dst;
- if(unlikely(procfile_linewords(aptr->ff, aptr->line) < 5)) return;
-
- //const char *real_gid = procfile_lineword(aptr->ff, aptr->line, 1);
- const char *effective_gid = procfile_lineword(aptr->ff, aptr->line, 2);
- //const char *saved_gid = procfile_lineword(aptr->ff, aptr->line, 3);
- //const char *filesystem_gid = procfile_lineword(aptr->ff, aptr->line, 4);
-
- if(likely(effective_gid && *effective_gid))
- aptr->p->gid = (uid_t)str2l(effective_gid);
-}
-
-void arl_callback_status_vmsize(const char *name, uint32_t hash, const char *value, void *dst) {
- (void)name; (void)hash; (void)value;
- struct arl_callback_ptr *aptr = (struct arl_callback_ptr *)dst;
- if(unlikely(procfile_linewords(aptr->ff, aptr->line) < 3)) return;
-
- aptr->p->status_vmsize = str2kernel_uint_t(procfile_lineword(aptr->ff, aptr->line, 1));
-}
-
-void arl_callback_status_vmswap(const char *name, uint32_t hash, const char *value, void *dst) {
- (void)name; (void)hash; (void)value;
- struct arl_callback_ptr *aptr = (struct arl_callback_ptr *)dst;
- if(unlikely(procfile_linewords(aptr->ff, aptr->line) < 3)) return;
-
- aptr->p->status_vmswap = str2kernel_uint_t(procfile_lineword(aptr->ff, aptr->line, 1));
-}
-
-void arl_callback_status_vmrss(const char *name, uint32_t hash, const char *value, void *dst) {
- (void)name; (void)hash; (void)value;
- struct arl_callback_ptr *aptr = (struct arl_callback_ptr *)dst;
- if(unlikely(procfile_linewords(aptr->ff, aptr->line) < 3)) return;
-
- aptr->p->status_vmrss = str2kernel_uint_t(procfile_lineword(aptr->ff, aptr->line, 1));
-}
-
-void arl_callback_status_rssfile(const char *name, uint32_t hash, const char *value, void *dst) {
- (void)name; (void)hash; (void)value;
- struct arl_callback_ptr *aptr = (struct arl_callback_ptr *)dst;
- if(unlikely(procfile_linewords(aptr->ff, aptr->line) < 3)) return;
-
- aptr->p->status_rssfile = str2kernel_uint_t(procfile_lineword(aptr->ff, aptr->line, 1));
-}
-
-void arl_callback_status_rssshmem(const char *name, uint32_t hash, const char *value, void *dst) {
- (void)name; (void)hash; (void)value;
- struct arl_callback_ptr *aptr = (struct arl_callback_ptr *)dst;
- if(unlikely(procfile_linewords(aptr->ff, aptr->line) < 3)) return;
-
- aptr->p->status_rssshmem = str2kernel_uint_t(procfile_lineword(aptr->ff, aptr->line, 1));
-}
-
-void arl_callback_status_voluntary_ctxt_switches(const char *name, uint32_t hash, const char *value, void *dst) {
- (void)name; (void)hash; (void)value;
- struct arl_callback_ptr *aptr = (struct arl_callback_ptr *)dst;
- if(unlikely(procfile_linewords(aptr->ff, aptr->line) < 2)) return;
-
- struct pid_stat *p = aptr->p;
- pid_incremental_rate(stat, p->status_voluntary_ctxt_switches, str2kernel_uint_t(procfile_lineword(aptr->ff, aptr->line, 1)));
-}
-
-void arl_callback_status_nonvoluntary_ctxt_switches(const char *name, uint32_t hash, const char *value, void *dst) {
- (void)name; (void)hash; (void)value;
- struct arl_callback_ptr *aptr = (struct arl_callback_ptr *)dst;
- if(unlikely(procfile_linewords(aptr->ff, aptr->line) < 2)) return;
-
- struct pid_stat *p = aptr->p;
- pid_incremental_rate(stat, p->status_nonvoluntary_ctxt_switches, str2kernel_uint_t(procfile_lineword(aptr->ff, aptr->line, 1)));
-}
-
-static inline bool read_proc_pid_status_per_os(struct pid_stat *p, void *ptr __maybe_unused) {
- static struct arl_callback_ptr arl_ptr;
- static procfile *ff = NULL;
-
- if(unlikely(!p->status_arl)) {
- p->status_arl = arl_create("/proc/pid/status", NULL, 60);
- arl_expect_custom(p->status_arl, "Uid", arl_callback_status_uid, &arl_ptr);
- arl_expect_custom(p->status_arl, "Gid", arl_callback_status_gid, &arl_ptr);
- arl_expect_custom(p->status_arl, "VmSize", arl_callback_status_vmsize, &arl_ptr);
- arl_expect_custom(p->status_arl, "VmRSS", arl_callback_status_vmrss, &arl_ptr);
- arl_expect_custom(p->status_arl, "RssFile", arl_callback_status_rssfile, &arl_ptr);
- arl_expect_custom(p->status_arl, "RssShmem", arl_callback_status_rssshmem, &arl_ptr);
- arl_expect_custom(p->status_arl, "VmSwap", arl_callback_status_vmswap, &arl_ptr);
- arl_expect_custom(p->status_arl, "voluntary_ctxt_switches", arl_callback_status_voluntary_ctxt_switches, &arl_ptr);
- arl_expect_custom(p->status_arl, "nonvoluntary_ctxt_switches", arl_callback_status_nonvoluntary_ctxt_switches, &arl_ptr);
- }
-
- if(unlikely(!p->status_filename)) {
- char filename[FILENAME_MAX + 1];
- snprintfz(filename, FILENAME_MAX, "%s/proc/%d/status", netdata_configured_host_prefix, p->pid);
- p->status_filename = strdupz(filename);
- }
-
- ff = procfile_reopen(ff, p->status_filename, (!ff)?" \t:,-()/":NULL, PROCFILE_FLAG_NO_ERROR_ON_FILE_IO);
- if(unlikely(!ff)) return false;
-
- ff = procfile_readall(ff);
- if(unlikely(!ff)) return false;
-
- calls_counter++;
-
- // let ARL use this pid
- arl_ptr.p = p;
- arl_ptr.ff = ff;
-
- size_t lines = procfile_lines(ff), l;
- arl_begin(p->status_arl);
-
- for(l = 0; l < lines ;l++) {
- // debug_log("CHECK: line %zu of %zu, key '%s' = '%s'", l, lines, procfile_lineword(ff, l, 0), procfile_lineword(ff, l, 1));
- arl_ptr.line = l;
- if(unlikely(arl_check(p->status_arl,
- procfile_lineword(ff, l, 0),
- procfile_lineword(ff, l, 1)))) break;
- }
-
- p->status_vmshared = p->status_rssfile + p->status_rssshmem;
-
- // debug_log("%s uid %d, gid %d, VmSize %zu, VmRSS %zu, RssFile %zu, RssShmem %zu, shared %zu", p->comm, (int)p->uid, (int)p->gid, p->status_vmsize, p->status_vmrss, p->status_rssfile, p->status_rssshmem, p->status_vmshared);
-
- return true;
-}
-#endif // !__FreeBSD__ !__APPLE__
-
-int read_proc_pid_status(struct pid_stat *p, void *ptr) {
- p->status_vmsize = 0;
- p->status_vmrss = 0;
- p->status_vmshared = 0;
- p->status_rssfile = 0;
- p->status_rssshmem = 0;
- p->status_vmswap = 0;
- p->status_voluntary_ctxt_switches = 0;
- p->status_nonvoluntary_ctxt_switches = 0;
-
- return read_proc_pid_status_per_os(p, ptr) ? 1 : 0;
-}
diff --git a/src/collectors/apps.plugin/apps_proc_pids.c b/src/collectors/apps.plugin/apps_proc_pids.c
deleted file mode 100644
index b53060d60..000000000
--- a/src/collectors/apps.plugin/apps_proc_pids.c
+++ /dev/null
@@ -1,720 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-#include "apps_plugin.h"
-
-static struct pid_stat **all_pids = NULL;
-size_t all_pids_count = 0; // the number of processes running
-
-struct pid_stat *root_of_pids = NULL; // global linked list of all processes running
-
-#if (ALL_PIDS_ARE_READ_INSTANTLY == 0)
-// Another pre-allocated list of all possible pids.
-// We need it to assign them a unique sortlist id, so that we
-// read parents before children. This is needed to prevent a situation where
-// a child is found running, but until we read its parent, it has exited and
-// its parent has accumulated its resources.
-pid_t *all_pids_sortlist = NULL;
-#endif
-
-void pids_init(void) {
-#if (ALL_PIDS_ARE_READ_INSTANTLY == 0)
- all_pids_sortlist = callocz(sizeof(pid_t), (size_t)pid_max + 1);
-#endif
-
- all_pids = callocz(sizeof(struct pid_stat *), (size_t) pid_max + 1);
-}
-
-inline struct pid_stat *find_pid_entry(pid_t pid) {
- return all_pids[pid];
-}
-
-static inline struct pid_stat *get_or_allocate_pid_entry(pid_t pid) {
- struct pid_stat *p = find_pid_entry(pid);
- if(likely(p))
- return p;
-
- p = callocz(sizeof(struct pid_stat), 1);
- p->fds = mallocz(sizeof(struct pid_fd) * MAX_SPARE_FDS);
- p->fds_size = MAX_SPARE_FDS;
- init_pid_fds(p, 0, p->fds_size);
- p->pid = pid;
-
- DOUBLE_LINKED_LIST_APPEND_ITEM_UNSAFE(root_of_pids, p, prev, next);
- all_pids[pid] = p;
- all_pids_count++;
-
- return p;
-}
-
-static inline void del_pid_entry(pid_t pid) {
- struct pid_stat *p = find_pid_entry(pid);
-
- if(unlikely(!p)) {
- netdata_log_error("attempted to free pid %d that is not allocated.", pid);
- return;
- }
-
- debug_log("process %d %s exited, deleting it.", pid, p->comm);
-
- DOUBLE_LINKED_LIST_REMOVE_ITEM_UNSAFE(root_of_pids, p, prev, next);
-
-#if !defined(__FreeBSD__) && !defined(__APPLE__)
- {
- size_t i;
- for(i = 0; i < p->fds_size; i++)
- if(p->fds[i].filename)
- freez(p->fds[i].filename);
- }
- arl_free(p->status_arl);
-#endif
-
- freez(p->fds);
- freez(p->fds_dirname);
- freez(p->stat_filename);
- freez(p->status_filename);
- freez(p->limits_filename);
- freez(p->io_filename);
- freez(p->cmdline_filename);
- freez(p->cmdline);
- freez(p);
-
- all_pids[pid] = NULL;
- all_pids_count--;
-}
-
-static inline int collect_data_for_pid(pid_t pid, void *ptr) {
- if(unlikely(pid < 0 || pid > pid_max)) {
- netdata_log_error("Invalid pid %d read (expected %d to %d). Ignoring process.", pid, 0, pid_max);
- return 0;
- }
-
- struct pid_stat *p = get_or_allocate_pid_entry(pid);
- if(unlikely(!p || p->read)) return 0;
- p->read = true;
-
- // debug_log("Reading process %d (%s), sortlist %d", p->pid, p->comm, p->sortlist);
-
- // --------------------------------------------------------------------
- // /proc/<pid>/stat
-
- if(unlikely(!managed_log(p, PID_LOG_STAT, read_proc_pid_stat(p, ptr))))
- // there is no reason to proceed if we cannot get its status
- return 0;
-
- // check its parent pid
- if(unlikely(p->ppid < 0 || p->ppid > pid_max)) {
- netdata_log_error("Pid %d (command '%s') states invalid parent pid %d. Using 0.", pid, p->comm, p->ppid);
- p->ppid = 0;
- }
-
- // --------------------------------------------------------------------
- // /proc/<pid>/io
-
- managed_log(p, PID_LOG_IO, read_proc_pid_io(p, ptr));
-
- // --------------------------------------------------------------------
- // /proc/<pid>/status
-
- if(unlikely(!managed_log(p, PID_LOG_STATUS, read_proc_pid_status(p, ptr))))
- // there is no reason to proceed if we cannot get its status
- return 0;
-
- // --------------------------------------------------------------------
- // /proc/<pid>/fd
-
- if(enable_file_charts) {
- managed_log(p, PID_LOG_FDS, read_pid_file_descriptors(p, ptr));
- managed_log(p, PID_LOG_LIMITS, read_proc_pid_limits(p, ptr));
- }
-
- // --------------------------------------------------------------------
- // done!
-
- if(unlikely(debug_enabled && include_exited_childs && all_pids_count && p->ppid && all_pids[p->ppid] && !all_pids[p->ppid]->read))
- debug_log("Read process %d (%s) sortlisted %d, but its parent %d (%s) sortlisted %d, is not read", p->pid, p->comm, p->sortlist, all_pids[p->ppid]->pid, all_pids[p->ppid]->comm, all_pids[p->ppid]->sortlist);
-
- // mark it as updated
- p->updated = true;
- p->keep = false;
- p->keeploops = 0;
-
- return 1;
-}
-
-void cleanup_exited_pids(void) {
- size_t c;
- struct pid_stat *p = NULL;
-
- for(p = root_of_pids; p ;) {
- if(!p->updated && (!p->keep || p->keeploops > 0)) {
- if(unlikely(debug_enabled && (p->keep || p->keeploops)))
- debug_log(" > CLEANUP cannot keep exited process %d (%s) anymore - removing it.", p->pid, p->comm);
-
- for(c = 0; c < p->fds_size; c++)
- if(p->fds[c].fd > 0) {
- file_descriptor_not_used(p->fds[c].fd);
- clear_pid_fd(&p->fds[c]);
- }
-
- pid_t r = p->pid;
- p = p->next;
- del_pid_entry(r);
- }
- else {
- if(unlikely(p->keep)) p->keeploops++;
- p->keep = false;
- p = p->next;
- }
- }
-}
-
-// ----------------------------------------------------------------------------
-
-static inline void link_all_processes_to_their_parents(void) {
- struct pid_stat *p, *pp;
-
- // link all children to their parents
- // and update children count on parents
- for(p = root_of_pids; p ; p = p->next) {
- // for each process found
-
- p->sortlist = 0;
- p->parent = NULL;
-
- if(unlikely(!p->ppid)) {
- //unnecessary code from apps_plugin.c
- //p->parent = NULL;
- continue;
- }
-
- pp = all_pids[p->ppid];
- if(likely(pp)) {
- p->parent = pp;
- pp->children_count++;
-
- if(unlikely(debug_enabled || (p->target && p->target->debug_enabled)))
- debug_log_int("child %d (%s, %s) on target '%s' has parent %d (%s, %s). Parent: utime=" KERNEL_UINT_FORMAT ", stime=" KERNEL_UINT_FORMAT ", gtime=" KERNEL_UINT_FORMAT ", minflt=" KERNEL_UINT_FORMAT ", majflt=" KERNEL_UINT_FORMAT ", cutime=" KERNEL_UINT_FORMAT ", cstime=" KERNEL_UINT_FORMAT ", cgtime=" KERNEL_UINT_FORMAT ", cminflt=" KERNEL_UINT_FORMAT ", cmajflt=" KERNEL_UINT_FORMAT "", p->pid, p->comm, p->updated?"running":"exited", (p->target)?p->target->name:"UNSET", pp->pid, pp->comm, pp->updated?"running":"exited", pp->utime, pp->stime, pp->gtime, pp->minflt, pp->majflt, pp->cutime, pp->cstime, pp->cgtime, pp->cminflt, pp->cmajflt);
- }
- else {
- p->parent = NULL;
- netdata_log_error("pid %d %s states parent %d, but the later does not exist.", p->pid, p->comm, p->ppid);
- }
- }
-}
-
-// ----------------------------------------------------------------------------
-
-static inline int debug_print_process_and_parents(struct pid_stat *p, usec_t time) {
- char *prefix = "\\_ ";
- int indent = 0;
-
- if(p->parent)
- indent = debug_print_process_and_parents(p->parent, p->stat_collected_usec);
- else
- prefix = " > ";
-
- char buffer[indent + 1];
- int i;
-
- for(i = 0; i < indent ;i++) buffer[i] = ' ';
- buffer[i] = '\0';
-
- fprintf(stderr, " %s %s%s (%d %s %"PRIu64""
- , buffer
- , prefix
- , p->comm
- , p->pid
- , p->updated?"running":"exited"
- , p->stat_collected_usec - time
- );
-
- if(p->utime) fprintf(stderr, " utime=" KERNEL_UINT_FORMAT, p->utime);
- if(p->stime) fprintf(stderr, " stime=" KERNEL_UINT_FORMAT, p->stime);
- if(p->gtime) fprintf(stderr, " gtime=" KERNEL_UINT_FORMAT, p->gtime);
- if(p->cutime) fprintf(stderr, " cutime=" KERNEL_UINT_FORMAT, p->cutime);
- if(p->cstime) fprintf(stderr, " cstime=" KERNEL_UINT_FORMAT, p->cstime);
- if(p->cgtime) fprintf(stderr, " cgtime=" KERNEL_UINT_FORMAT, p->cgtime);
- if(p->minflt) fprintf(stderr, " minflt=" KERNEL_UINT_FORMAT, p->minflt);
- if(p->cminflt) fprintf(stderr, " cminflt=" KERNEL_UINT_FORMAT, p->cminflt);
- if(p->majflt) fprintf(stderr, " majflt=" KERNEL_UINT_FORMAT, p->majflt);
- if(p->cmajflt) fprintf(stderr, " cmajflt=" KERNEL_UINT_FORMAT, p->cmajflt);
- fprintf(stderr, ")\n");
-
- return indent + 1;
-}
-
-static inline void debug_print_process_tree(struct pid_stat *p, char *msg __maybe_unused) {
- debug_log("%s: process %s (%d, %s) with parents:", msg, p->comm, p->pid, p->updated?"running":"exited");
- debug_print_process_and_parents(p, p->stat_collected_usec);
-}
-
-static inline void debug_find_lost_child(struct pid_stat *pe, kernel_uint_t lost, int type) {
- int found = 0;
- struct pid_stat *p = NULL;
-
- for(p = root_of_pids; p ; p = p->next) {
- if(p == pe) continue;
-
- switch(type) {
- case 1:
- if(p->cminflt > lost) {
- fprintf(stderr, " > process %d (%s) could use the lost exited child minflt " KERNEL_UINT_FORMAT " of process %d (%s)\n", p->pid, p->comm, lost, pe->pid, pe->comm);
- found++;
- }
- break;
-
- case 2:
- if(p->cmajflt > lost) {
- fprintf(stderr, " > process %d (%s) could use the lost exited child majflt " KERNEL_UINT_FORMAT " of process %d (%s)\n", p->pid, p->comm, lost, pe->pid, pe->comm);
- found++;
- }
- break;
-
- case 3:
- if(p->cutime > lost) {
- fprintf(stderr, " > process %d (%s) could use the lost exited child utime " KERNEL_UINT_FORMAT " of process %d (%s)\n", p->pid, p->comm, lost, pe->pid, pe->comm);
- found++;
- }
- break;
-
- case 4:
- if(p->cstime > lost) {
- fprintf(stderr, " > process %d (%s) could use the lost exited child stime " KERNEL_UINT_FORMAT " of process %d (%s)\n", p->pid, p->comm, lost, pe->pid, pe->comm);
- found++;
- }
- break;
-
- case 5:
- if(p->cgtime > lost) {
- fprintf(stderr, " > process %d (%s) could use the lost exited child gtime " KERNEL_UINT_FORMAT " of process %d (%s)\n", p->pid, p->comm, lost, pe->pid, pe->comm);
- found++;
- }
- break;
- }
- }
-
- if(!found) {
- switch(type) {
- case 1:
- fprintf(stderr, " > cannot find any process to use the lost exited child minflt " KERNEL_UINT_FORMAT " of process %d (%s)\n", lost, pe->pid, pe->comm);
- break;
-
- case 2:
- fprintf(stderr, " > cannot find any process to use the lost exited child majflt " KERNEL_UINT_FORMAT " of process %d (%s)\n", lost, pe->pid, pe->comm);
- break;
-
- case 3:
- fprintf(stderr, " > cannot find any process to use the lost exited child utime " KERNEL_UINT_FORMAT " of process %d (%s)\n", lost, pe->pid, pe->comm);
- break;
-
- case 4:
- fprintf(stderr, " > cannot find any process to use the lost exited child stime " KERNEL_UINT_FORMAT " of process %d (%s)\n", lost, pe->pid, pe->comm);
- break;
-
- case 5:
- fprintf(stderr, " > cannot find any process to use the lost exited child gtime " KERNEL_UINT_FORMAT " of process %d (%s)\n", lost, pe->pid, pe->comm);
- break;
- }
- }
-}
-
-static inline kernel_uint_t remove_exited_child_from_parent(kernel_uint_t *field, kernel_uint_t *pfield) {
- kernel_uint_t absorbed = 0;
-
- if(*field > *pfield) {
- absorbed += *pfield;
- *field -= *pfield;
- *pfield = 0;
- }
- else {
- absorbed += *field;
- *pfield -= *field;
- *field = 0;
- }
-
- return absorbed;
-}
-
-static inline void process_exited_pids() {
- struct pid_stat *p;
-
- for(p = root_of_pids; p ; p = p->next) {
- if(p->updated || !p->stat_collected_usec)
- continue;
-
- kernel_uint_t utime = (p->utime_raw + p->cutime_raw) * (USEC_PER_SEC * RATES_DETAIL) / (p->stat_collected_usec - p->last_stat_collected_usec);
- kernel_uint_t stime = (p->stime_raw + p->cstime_raw) * (USEC_PER_SEC * RATES_DETAIL) / (p->stat_collected_usec - p->last_stat_collected_usec);
- kernel_uint_t gtime = (p->gtime_raw + p->cgtime_raw) * (USEC_PER_SEC * RATES_DETAIL) / (p->stat_collected_usec - p->last_stat_collected_usec);
- kernel_uint_t minflt = (p->minflt_raw + p->cminflt_raw) * (USEC_PER_SEC * RATES_DETAIL) / (p->stat_collected_usec - p->last_stat_collected_usec);
- kernel_uint_t majflt = (p->majflt_raw + p->cmajflt_raw) * (USEC_PER_SEC * RATES_DETAIL) / (p->stat_collected_usec - p->last_stat_collected_usec);
-
- if(utime + stime + gtime + minflt + majflt == 0)
- continue;
-
- if(unlikely(debug_enabled)) {
- debug_log("Absorb %s (%d %s total resources: utime=" KERNEL_UINT_FORMAT " stime=" KERNEL_UINT_FORMAT " gtime=" KERNEL_UINT_FORMAT " minflt=" KERNEL_UINT_FORMAT " majflt=" KERNEL_UINT_FORMAT ")"
- , p->comm
- , p->pid
- , p->updated?"running":"exited"
- , utime
- , stime
- , gtime
- , minflt
- , majflt
- );
- debug_print_process_tree(p, "Searching parents");
- }
-
- struct pid_stat *pp;
- for(pp = p->parent; pp ; pp = pp->parent) {
- if(!pp->updated) continue;
-
- kernel_uint_t absorbed;
- absorbed = remove_exited_child_from_parent(&utime, &pp->cutime);
- if(unlikely(debug_enabled && absorbed))
- debug_log(" > process %s (%d %s) absorbed " KERNEL_UINT_FORMAT " utime (remaining: " KERNEL_UINT_FORMAT ")", pp->comm, pp->pid, pp->updated?"running":"exited", absorbed, utime);
-
- absorbed = remove_exited_child_from_parent(&stime, &pp->cstime);
- if(unlikely(debug_enabled && absorbed))
- debug_log(" > process %s (%d %s) absorbed " KERNEL_UINT_FORMAT " stime (remaining: " KERNEL_UINT_FORMAT ")", pp->comm, pp->pid, pp->updated?"running":"exited", absorbed, stime);
-
- absorbed = remove_exited_child_from_parent(&gtime, &pp->cgtime);
- if(unlikely(debug_enabled && absorbed))
- debug_log(" > process %s (%d %s) absorbed " KERNEL_UINT_FORMAT " gtime (remaining: " KERNEL_UINT_FORMAT ")", pp->comm, pp->pid, pp->updated?"running":"exited", absorbed, gtime);
-
- absorbed = remove_exited_child_from_parent(&minflt, &pp->cminflt);
- if(unlikely(debug_enabled && absorbed))
- debug_log(" > process %s (%d %s) absorbed " KERNEL_UINT_FORMAT " minflt (remaining: " KERNEL_UINT_FORMAT ")", pp->comm, pp->pid, pp->updated?"running":"exited", absorbed, minflt);
-
- absorbed = remove_exited_child_from_parent(&majflt, &pp->cmajflt);
- if(unlikely(debug_enabled && absorbed))
- debug_log(" > process %s (%d %s) absorbed " KERNEL_UINT_FORMAT " majflt (remaining: " KERNEL_UINT_FORMAT ")", pp->comm, pp->pid, pp->updated?"running":"exited", absorbed, majflt);
- }
-
- if(unlikely(utime + stime + gtime + minflt + majflt > 0)) {
- if(unlikely(debug_enabled)) {
- if(utime) debug_find_lost_child(p, utime, 3);
- if(stime) debug_find_lost_child(p, stime, 4);
- if(gtime) debug_find_lost_child(p, gtime, 5);
- if(minflt) debug_find_lost_child(p, minflt, 1);
- if(majflt) debug_find_lost_child(p, majflt, 2);
- }
-
- p->keep = true;
-
- debug_log(" > remaining resources - KEEP - for another loop: %s (%d %s total resources: utime=" KERNEL_UINT_FORMAT " stime=" KERNEL_UINT_FORMAT " gtime=" KERNEL_UINT_FORMAT " minflt=" KERNEL_UINT_FORMAT " majflt=" KERNEL_UINT_FORMAT ")"
- , p->comm
- , p->pid
- , p->updated?"running":"exited"
- , utime
- , stime
- , gtime
- , minflt
- , majflt
- );
-
- for(pp = p->parent; pp ; pp = pp->parent) {
- if(pp->updated) break;
- pp->keep = true;
-
- debug_log(" > - KEEP - parent for another loop: %s (%d %s)"
- , pp->comm
- , pp->pid
- , pp->updated?"running":"exited"
- );
- }
-
- p->utime_raw = utime * (p->stat_collected_usec - p->last_stat_collected_usec) / (USEC_PER_SEC * RATES_DETAIL);
- p->stime_raw = stime * (p->stat_collected_usec - p->last_stat_collected_usec) / (USEC_PER_SEC * RATES_DETAIL);
- p->gtime_raw = gtime * (p->stat_collected_usec - p->last_stat_collected_usec) / (USEC_PER_SEC * RATES_DETAIL);
- p->minflt_raw = minflt * (p->stat_collected_usec - p->last_stat_collected_usec) / (USEC_PER_SEC * RATES_DETAIL);
- p->majflt_raw = majflt * (p->stat_collected_usec - p->last_stat_collected_usec) / (USEC_PER_SEC * RATES_DETAIL);
- p->cutime_raw = p->cstime_raw = p->cgtime_raw = p->cminflt_raw = p->cmajflt_raw = 0;
-
- debug_log(" ");
- }
- else
- debug_log(" > totally absorbed - DONE - %s (%d %s)"
- , p->comm
- , p->pid
- , p->updated?"running":"exited"
- );
- }
-}
-
-// ----------------------------------------------------------------------------
-
-// 1. read all files in /proc
-// 2. for each numeric directory:
-// i. read /proc/pid/stat
-// ii. read /proc/pid/status
-// iii. read /proc/pid/io (requires root access)
-// iii. read the entries in directory /proc/pid/fd (requires root access)
-// for each entry:
-// a. find or create a struct file_descriptor
-// b. cleanup any old/unused file_descriptors
-
-// after all these, some pids may be linked to targets, while others may not
-
-// in case of errors, only 1 every 1000 errors is printed
-// to avoid filling up all disk space
-// if debug is enabled, all errors are printed
-
-static inline void mark_pid_as_unread(struct pid_stat *p) {
- p->read = false; // mark it as not read, so that collect_data_for_pid() will read it
- p->updated = false;
- p->merged = false;
- p->children_count = 0;
- p->parent = NULL;
-}
-
-#if defined(__FreeBSD__) || defined(__APPLE__)
-static inline void get_current_time(void) {
- struct timeval current_time;
- gettimeofday(&current_time, NULL);
- system_current_time_ut = timeval_usec(&current_time);
-}
-#endif
-
-#if defined(__FreeBSD__)
-static inline bool collect_data_for_all_pids_per_os(void) {
- // Mark all processes as unread before collecting new data
- struct pid_stat *p = NULL;
- if(all_pids_count) {
- for(p = root_of_pids; p ; p = p->next)
- mark_pid_as_unread(p);
- }
-
- int i, procnum;
-
- static size_t procbase_size = 0;
- static struct kinfo_proc *procbase = NULL;
-
- size_t new_procbase_size;
-
- int mib[3] = { CTL_KERN, KERN_PROC, KERN_PROC_PROC };
- if (unlikely(sysctl(mib, 3, NULL, &new_procbase_size, NULL, 0))) {
- netdata_log_error("sysctl error: Can't get processes data size");
- return false;
- }
-
- // give it some air for processes that may be started
- // during this little time.
- new_procbase_size += 100 * sizeof(struct kinfo_proc);
-
- // increase the buffer if needed
- if(new_procbase_size > procbase_size) {
- procbase_size = new_procbase_size;
- procbase = reallocz(procbase, procbase_size);
- }
-
- // sysctl() gets from new_procbase_size the buffer size
- // and also returns to it the amount of data filled in
- new_procbase_size = procbase_size;
-
- // get the processes from the system
- if (unlikely(sysctl(mib, 3, procbase, &new_procbase_size, NULL, 0))) {
- netdata_log_error("sysctl error: Can't get processes data");
- return false;
- }
-
- // based on the amount of data filled in
- // calculate the number of processes we got
- procnum = new_procbase_size / sizeof(struct kinfo_proc);
-
- get_current_time();
-
- for (i = 0 ; i < procnum ; ++i) {
- pid_t pid = procbase[i].ki_pid;
- if (pid <= 0) continue;
- collect_data_for_pid(pid, &procbase[i]);
- }
-
- return true;
-}
-#endif // __FreeBSD__
-
-#if defined(__APPLE__)
-static inline bool collect_data_for_all_pids_per_os(void) {
- // Mark all processes as unread before collecting new data
- struct pid_stat *p;
- if(all_pids_count) {
- for(p = root_of_pids; p; p = p->next)
- mark_pid_as_unread(p);
- }
-
- static pid_t *pids = NULL;
- static int allocatedProcessCount = 0;
-
- // Get the number of processes
- int numberOfProcesses = proc_listpids(PROC_ALL_PIDS, 0, NULL, 0);
- if (numberOfProcesses <= 0) {
- netdata_log_error("Failed to retrieve the process count");
- return false;
- }
-
- // Allocate or reallocate space to hold all the process IDs if necessary
- if (numberOfProcesses > allocatedProcessCount) {
- // Allocate additional space to avoid frequent reallocations
- allocatedProcessCount = numberOfProcesses + 100;
- pids = reallocz(pids, allocatedProcessCount * sizeof(pid_t));
- }
-
- // this is required, otherwise the PIDs become totally random
- memset(pids, 0, allocatedProcessCount * sizeof(pid_t));
-
- // get the list of PIDs
- numberOfProcesses = proc_listpids(PROC_ALL_PIDS, 0, pids, allocatedProcessCount * sizeof(pid_t));
- if (numberOfProcesses <= 0) {
- netdata_log_error("Failed to retrieve the process IDs");
- return false;
- }
-
- get_current_time();
-
- // Collect data for each process
- for (int i = 0; i < numberOfProcesses; ++i) {
- pid_t pid = pids[i];
- if (pid <= 0) continue;
-
- struct pid_info pi = { 0 };
-
- int mib[4] = {CTL_KERN, KERN_PROC, KERN_PROC_PID, pid};
-
- size_t procSize = sizeof(pi.proc);
- if(sysctl(mib, 4, &pi.proc, &procSize, NULL, 0) == -1) {
- netdata_log_error("Failed to get proc for PID %d", pid);
- continue;
- }
- if(procSize == 0) // no such process
- continue;
-
- int st = proc_pidinfo(pid, PROC_PIDTASKINFO, 0, &pi.taskinfo, sizeof(pi.taskinfo));
- if (st <= 0) {
- netdata_log_error("Failed to get task info for PID %d", pid);
- continue;
- }
-
- st = proc_pidinfo(pid, PROC_PIDTBSDINFO, 0, &pi.bsdinfo, sizeof(pi.bsdinfo));
- if (st <= 0) {
- netdata_log_error("Failed to get BSD info for PID %d", pid);
- continue;
- }
-
- st = proc_pid_rusage(pid, RUSAGE_INFO_V4, (rusage_info_t *)&pi.rusageinfo);
- if (st < 0) {
- netdata_log_error("Failed to get resource usage info for PID %d", pid);
- continue;
- }
-
- collect_data_for_pid(pid, &pi);
- }
-
- return true;
-}
-#endif // __APPLE__
-
-#if !defined(__FreeBSD__) && !defined(__APPLE__)
-static int compar_pid(const void *pid1, const void *pid2) {
-
- struct pid_stat *p1 = all_pids[*((pid_t *)pid1)];
- struct pid_stat *p2 = all_pids[*((pid_t *)pid2)];
-
- if(p1->sortlist > p2->sortlist)
- return -1;
- else
- return 1;
-}
-
-static inline bool collect_data_for_all_pids_per_os(void) {
- struct pid_stat *p = NULL;
-
- // clear process state counter
- memset(proc_state_count, 0, sizeof proc_state_count);
-
- if(all_pids_count) {
- size_t slc = 0;
- for(p = root_of_pids; p ; p = p->next) {
- mark_pid_as_unread(p);
- all_pids_sortlist[slc++] = p->pid;
- }
-
- if(unlikely(slc != all_pids_count)) {
- netdata_log_error("Internal error: I was thinking I had %zu processes in my arrays, but it seems there are %zu.", all_pids_count, slc);
- all_pids_count = slc;
- }
-
- if(include_exited_childs) {
- // Read parents before childs
- // This is needed to prevent a situation where
- // a child is found running, but until we read
- // its parent, it has exited and its parent
- // has accumulated its resources.
-
- qsort((void *)all_pids_sortlist, (size_t)all_pids_count, sizeof(pid_t), compar_pid);
-
- // we forward read all running processes
- // collect_data_for_pid() is smart enough,
- // not to read the same pid twice per iteration
- for(slc = 0; slc < all_pids_count; slc++) {
- collect_data_for_pid(all_pids_sortlist[slc], NULL);
- }
- }
- }
-
- static char uptime_filename[FILENAME_MAX + 1] = "";
- if(*uptime_filename == '\0')
- snprintfz(uptime_filename, FILENAME_MAX, "%s/proc/uptime", netdata_configured_host_prefix);
-
- system_uptime_secs = (kernel_uint_t)(uptime_msec(uptime_filename) / MSEC_PER_SEC);
-
- char dirname[FILENAME_MAX + 1];
-
- snprintfz(dirname, FILENAME_MAX, "%s/proc", netdata_configured_host_prefix);
- DIR *dir = opendir(dirname);
- if(!dir) return false;
-
- struct dirent *de = NULL;
-
- while((de = readdir(dir))) {
- char *endptr = de->d_name;
-
- if(unlikely(de->d_type != DT_DIR || de->d_name[0] < '0' || de->d_name[0] > '9'))
- continue;
-
- pid_t pid = (pid_t) strtoul(de->d_name, &endptr, 10);
-
- // make sure we read a valid number
- if(unlikely(endptr == de->d_name || *endptr != '\0'))
- continue;
-
- collect_data_for_pid(pid, NULL);
- }
- closedir(dir);
-
- return true;
-}
-#endif // !__FreeBSD__ && !__APPLE__
-
-bool collect_data_for_all_pids(void) {
- if(!collect_data_for_all_pids_per_os())
- return false;
-
- if(!all_pids_count)
- return false;
-
- // we need /proc/stat to normalize the cpu consumption of the exited childs
- read_global_time();
-
- // build the process tree
- link_all_processes_to_their_parents();
-
- // normally this is done
- // however we may have processes exited while we collected values
- // so let's find the exited ones
- // we do this by collecting the ownership of process
- // if we manage to get the ownership, the process still runs
- process_exited_pids();
-
- return true;
-}
diff --git a/src/collectors/apps.plugin/apps_proc_stat.c b/src/collectors/apps.plugin/apps_proc_stat.c
deleted file mode 100644
index 8564ddd55..000000000
--- a/src/collectors/apps.plugin/apps_proc_stat.c
+++ /dev/null
@@ -1,154 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-#include "apps_plugin.h"
-
-#if defined(__APPLE__)
-int read_global_time(void) {
- static kernel_uint_t utime_raw = 0, stime_raw = 0, ntime_raw = 0;
- static usec_t collected_usec = 0, last_collected_usec = 0;
-
- host_cpu_load_info_data_t cpuinfo;
- mach_msg_type_number_t count = HOST_CPU_LOAD_INFO_COUNT;
-
- if (host_statistics(mach_host_self(), HOST_CPU_LOAD_INFO, (host_info_t)&cpuinfo, &count) != KERN_SUCCESS) {
- // Handle error
- goto cleanup;
- }
-
- last_collected_usec = collected_usec;
- collected_usec = now_monotonic_usec();
-
- calls_counter++;
-
- // Convert ticks to time
- // Note: MacOS does not separate nice time from user time in the CPU stats, so you might need to adjust this logic
- kernel_uint_t global_ntime = 0; // Assuming you want to keep track of nice time separately
-
- incremental_rate(global_utime, utime_raw, cpuinfo.cpu_ticks[CPU_STATE_USER] + cpuinfo.cpu_ticks[CPU_STATE_NICE], collected_usec, last_collected_usec);
- incremental_rate(global_ntime, ntime_raw, cpuinfo.cpu_ticks[CPU_STATE_NICE], collected_usec, last_collected_usec);
- incremental_rate(global_stime, stime_raw, cpuinfo.cpu_ticks[CPU_STATE_SYSTEM], collected_usec, last_collected_usec);
-
- global_utime += global_ntime;
-
- if(unlikely(global_iterations_counter == 1)) {
- global_utime = 0;
- global_stime = 0;
- global_gtime = 0;
- }
-
- return 1;
-
-cleanup:
- global_utime = 0;
- global_stime = 0;
- global_gtime = 0;
- return 0;
-}
-#endif // __APPLE__
-
-
-#if defined(__FreeBSD__)
-int read_global_time(void) {
- static kernel_uint_t utime_raw = 0, stime_raw = 0, ntime_raw = 0;
- static usec_t collected_usec = 0, last_collected_usec = 0;
- long cp_time[CPUSTATES];
-
- if (unlikely(CPUSTATES != 5)) {
- goto cleanup;
- } else {
- static int mib[2] = {0, 0};
-
- if (unlikely(GETSYSCTL_SIMPLE("kern.cp_time", mib, cp_time))) {
- goto cleanup;
- }
- }
-
- last_collected_usec = collected_usec;
- collected_usec = now_monotonic_usec();
-
- calls_counter++;
-
- // temporary - it is added global_ntime;
- kernel_uint_t global_ntime = 0;
-
- incremental_rate(global_utime, utime_raw, cp_time[0] * 100LLU / system_hz, collected_usec, last_collected_usec);
- incremental_rate(global_ntime, ntime_raw, cp_time[1] * 100LLU / system_hz, collected_usec, last_collected_usec);
- incremental_rate(global_stime, stime_raw, cp_time[2] * 100LLU / system_hz, collected_usec, last_collected_usec);
-
- global_utime += global_ntime;
-
- if(unlikely(global_iterations_counter == 1)) {
- global_utime = 0;
- global_stime = 0;
- global_gtime = 0;
- }
-
- return 1;
-
-cleanup:
- global_utime = 0;
- global_stime = 0;
- global_gtime = 0;
- return 0;
-}
-#endif // __APPLE__
-
-#if !defined(__FreeBSD__) && !defined(__APPLE__)
-int read_global_time(void) {
- static char filename[FILENAME_MAX + 1] = "";
- static procfile *ff = NULL;
- static kernel_uint_t utime_raw = 0, stime_raw = 0, gtime_raw = 0, gntime_raw = 0, ntime_raw = 0;
- static usec_t collected_usec = 0, last_collected_usec = 0;
-
- if(unlikely(!ff)) {
- snprintfz(filename, FILENAME_MAX, "%s/proc/stat", netdata_configured_host_prefix);
- ff = procfile_open(filename, " \t:", PROCFILE_FLAG_DEFAULT);
- if(unlikely(!ff)) goto cleanup;
- }
-
- ff = procfile_readall(ff);
- if(unlikely(!ff)) goto cleanup;
-
- last_collected_usec = collected_usec;
- collected_usec = now_monotonic_usec();
-
- calls_counter++;
-
- // temporary - it is added global_ntime;
- kernel_uint_t global_ntime = 0;
-
- incremental_rate(global_utime, utime_raw, str2kernel_uint_t(procfile_lineword(ff, 0, 1)), collected_usec, last_collected_usec);
- incremental_rate(global_ntime, ntime_raw, str2kernel_uint_t(procfile_lineword(ff, 0, 2)), collected_usec, last_collected_usec);
- incremental_rate(global_stime, stime_raw, str2kernel_uint_t(procfile_lineword(ff, 0, 3)), collected_usec, last_collected_usec);
- incremental_rate(global_gtime, gtime_raw, str2kernel_uint_t(procfile_lineword(ff, 0, 10)), collected_usec, last_collected_usec);
-
- global_utime += global_ntime;
-
- if(enable_guest_charts) {
- // temporary - it is added global_ntime;
- kernel_uint_t global_gntime = 0;
-
- // guest nice time, on guest time
- incremental_rate(global_gntime, gntime_raw, str2kernel_uint_t(procfile_lineword(ff, 0, 11)), collected_usec, last_collected_usec);
-
- global_gtime += global_gntime;
-
- // remove guest time from user time
- global_utime -= (global_utime > global_gtime) ? global_gtime : global_utime;
- }
-
- if(unlikely(global_iterations_counter == 1)) {
- global_utime = 0;
- global_stime = 0;
- global_gtime = 0;
- }
-
- return 1;
-
-cleanup:
- global_utime = 0;
- global_stime = 0;
- global_gtime = 0;
- return 0;
-}
-#endif // !__FreeBSD__ !__APPLE__
diff --git a/src/collectors/apps.plugin/apps_targets.c b/src/collectors/apps.plugin/apps_targets.c
index 7deaa798c..46db128cc 100644
--- a/src/collectors/apps.plugin/apps_targets.c
+++ b/src/collectors/apps.plugin/apps_targets.c
@@ -2,199 +2,370 @@
#include "apps_plugin.h"
-// ----------------------------------------------------------------------------
-// apps_groups.conf
-// aggregate all processes in groups, to have a limited number of dimensions
+pid_t INIT_PID = OS_INIT_PID;
-struct target *get_users_target(uid_t uid) {
- struct target *w;
- for(w = users_root_target ; w ; w = w->next)
- if(w->uid == uid) return w;
+static STRING *get_clean_name(STRING *name) {
+ char buf[string_strlen(name) + 1];
+ memcpy(buf, string2str(name), string_strlen(name) + 1);
+ netdata_fix_chart_name(buf);
- w = callocz(sizeof(struct target), 1);
- snprintfz(w->compare, MAX_COMPARE_NAME, "%u", uid);
- w->comparehash = simple_hash(w->compare);
- w->comparelen = strlen(w->compare);
+ for (char *d = buf; *d ; d++)
+ if (*d == '.') *d = '_';
- snprintfz(w->id, MAX_NAME, "%u", uid);
- w->idhash = simple_hash(w->id);
+ return string_strdupz(buf);
+}
- struct user_or_group_id user_id_to_find = {
- .id = {
- .uid = uid,
- }
- };
- struct user_or_group_id *user_or_group_id = user_id_find(&user_id_to_find);
+static inline STRING *get_numeric_string(uint64_t n) {
+ char buf[UINT64_MAX_LENGTH];
+ print_uint64(buf, n);
+ return string_strdupz(buf);
+}
- if(user_or_group_id && user_or_group_id->name && *user_or_group_id->name)
- snprintfz(w->name, MAX_NAME, "%s", user_or_group_id->name);
+struct target *find_target_by_name(struct target *base, const char *name) {
+ struct target *t;
+ for(t = base; t ; t = t->next) {
+ if (string_strcmp(t->name, name) == 0)
+ return t;
+ }
- else {
- struct passwd *pw = getpwuid(uid);
- if(!pw || !pw->pw_name || !*pw->pw_name)
- snprintfz(w->name, MAX_NAME, "%u", uid);
+ return NULL;
+}
+
+// --------------------------------------------------------------------------------------------------------------------
+// Process managers and aggregators
+
+struct comm_list {
+ APPS_MATCH match;
+};
+
+struct managed_list {
+ size_t used;
+ size_t size;
+ struct comm_list *array;
+};
+
+static struct {
+ struct managed_list managers;
+ struct managed_list aggregators;
+ struct managed_list interpreters;
+} tree = {
+ .managers = {
+ .array = NULL,
+ .size = 0,
+ .used = 0,
+ },
+ .aggregators = {
+ .array = NULL,
+ .size = 0,
+ .used = 0,
+ }
+};
+
+static void managed_list_clear(struct managed_list *list) {
+ for(size_t c = 0; c < list->used ; c++)
+ pid_match_cleanup(&list->array[c].match);
+
+ freez(list->array);
+ list->array = NULL;
+ list->used = 0;
+ list->size = 0;
+}
+
+static void managed_list_add(struct managed_list *list, const char *s) {
+ if(list->used >= list->size) {
+ if(!list->size)
+ list->size = 16;
else
- snprintfz(w->name, MAX_NAME, "%s", pw->pw_name);
+ list->size *= 2;
+ list->array = reallocz(list->array, sizeof(*list->array) * list->size);
+ }
+
+ list->array[list->used++].match = pid_match_create(s);
+}
+
+static STRING *KernelAggregator = NULL;
+
+void apps_managers_and_aggregators_init(void) {
+ KernelAggregator = string_strdupz("kernel");
+
+ managed_list_clear(&tree.managers);
+#if defined(OS_LINUX)
+ managed_list_add(&tree.managers, "init"); // linux systems
+ managed_list_add(&tree.managers, "systemd"); // lxc containers and host systems (this also catches "systemd --user")
+ managed_list_add(&tree.managers, "containerd-shim-runc-v2"); // docker containers
+ managed_list_add(&tree.managers, "docker-init"); // docker containers
+ managed_list_add(&tree.managers, "tini"); // docker containers (https://github.com/krallin/tini)
+ managed_list_add(&tree.managers, "dumb-init"); // some docker containers use this
+ managed_list_add(&tree.managers, "openrc-run.sh"); // openrc
+ managed_list_add(&tree.managers, "crond"); // linux crond
+ managed_list_add(&tree.managers, "gnome-shell"); // gnome user applications
+ managed_list_add(&tree.managers, "plasmashell"); // kde user applications
+ managed_list_add(&tree.managers, "xfwm4"); // xfce4 user applications
+#elif defined(OS_WINDOWS)
+ managed_list_add(&tree.managers, "wininit");
+ managed_list_add(&tree.managers, "services");
+ managed_list_add(&tree.managers, "explorer");
+ managed_list_add(&tree.managers, "System");
+#elif defined(OS_FREEBSD)
+ managed_list_add(&tree.managers, "init");
+#elif defined(OS_MACOS)
+ managed_list_add(&tree.managers, "launchd");
+#endif
+
+#if defined(OS_WINDOWS)
+ managed_list_add(&tree.managers, "netdata");
+#else
+ managed_list_add(&tree.managers, "spawn-plugins");
+#endif
+
+ managed_list_clear(&tree.aggregators);
+#if defined(OS_LINUX)
+ managed_list_add(&tree.aggregators, "kthread");
+#elif defined(OS_WINDOWS)
+#elif defined(OS_FREEBSD)
+ managed_list_add(&tree.aggregators, "kernel");
+#elif defined(OS_MACOS)
+#endif
+
+ managed_list_clear(&tree.interpreters);
+ managed_list_add(&tree.interpreters, "python");
+ managed_list_add(&tree.interpreters, "python2");
+ managed_list_add(&tree.interpreters, "python3");
+ managed_list_add(&tree.interpreters, "sh");
+ managed_list_add(&tree.interpreters, "bash");
+ managed_list_add(&tree.interpreters, "node");
+ managed_list_add(&tree.interpreters, "perl");
+}
+
+bool is_process_a_manager(struct pid_stat *p) {
+ for(size_t c = 0; c < tree.managers.used ; c++) {
+ if(pid_match_check(p, &tree.managers.array[c].match))
+ return true;
+ }
+
+ return false;
+}
+
+bool is_process_an_aggregator(struct pid_stat *p) {
+ for(size_t c = 0; c < tree.aggregators.used ; c++) {
+ if(pid_match_check(p, &tree.aggregators.array[c].match))
+ return true;
+ }
+
+ return false;
+}
+
+bool is_process_an_interpreter(struct pid_stat *p) {
+ for(size_t c = 0; c < tree.interpreters.used ; c++) {
+ if(pid_match_check(p, &tree.interpreters.array[c].match))
+ return true;
+ }
+
+ return false;
+}
+
+// --------------------------------------------------------------------------------------------------------------------
+// Tree
+
+struct target *get_tree_target(struct pid_stat *p) {
+// // skip fast all the children that are more than 3 levels down
+// while(p->parent && p->parent->pid != INIT_PID && p->parent->parent && p->parent->parent->parent)
+// p = p->parent;
+
+ // keep the children of INIT_PID, and process orchestrators
+ while(p->parent && p->parent->pid != INIT_PID && p->parent->pid != 0 && !p->parent->is_manager)
+ p = p->parent;
+
+ // merge all processes into process aggregators
+ STRING *search_for = NULL;
+ if((p->ppid == 0 && p->pid != INIT_PID) || (p->parent && p->parent->is_aggregator)) {
+ search_for = string_dup(KernelAggregator);
}
+ else {
+#if (PROCESSES_HAVE_COMM_AND_NAME == 1)
+ search_for = string_dup(p->name ? p->name : p->comm);
+#else
+ search_for = string_dup(p->comm);
+#endif
+ }
+
+ // find an existing target with the required name
+ struct target *w;
+ for(w = apps_groups_root_target; w ; w = w->next) {
+ if (w->name == search_for) {
+ string_freez(search_for);
+ return w;
+ }
+ }
+
+ w = callocz(sizeof(struct target), 1);
+ w->type = TARGET_TYPE_TREE;
+ w->match.starts_with = w->match.ends_with = false;
+ w->match.compare = string_dup(search_for);
+ w->match.pattern = NULL;
+ w->id = search_for;
+ w->name = string_dup(search_for);
+ w->clean_name = get_clean_name(w->name);
+
+ w->next = apps_groups_root_target;
+ apps_groups_root_target = w;
+
+ return w;
+}
+
+// --------------------------------------------------------------------------------------------------------------------
+// Users
+
+#if (PROCESSES_HAVE_UID == 1)
+struct target *users_root_target = NULL;
- strncpyz(w->clean_name, w->name, MAX_NAME);
- netdata_fix_chart_name(w->clean_name);
+struct target *get_uid_target(uid_t uid) {
+ struct target *w;
+ for(w = users_root_target ; w ; w = w->next)
+ if(w->uid == uid) return w;
+ w = callocz(sizeof(struct target), 1);
+ w->type = TARGET_TYPE_UID;
w->uid = uid;
+ w->id = get_numeric_string(uid);
+
+ CACHED_USERNAME cu = cached_username_get_by_uid(uid);
+ w->name = string_dup(cu.username);
+ w->clean_name = get_clean_name(w->name);
+ cached_username_release(cu);
w->next = users_root_target;
users_root_target = w;
- debug_log("added uid %u ('%s') target", w->uid, w->name);
+ debug_log("added uid %u ('%s') target", w->uid, string2str(w->name));
return w;
}
+#endif
+
+// --------------------------------------------------------------------------------------------------------------------
+// Groups
+
+#if (PROCESSES_HAVE_GID == 1)
+struct target *groups_root_target = NULL;
-struct target *get_groups_target(gid_t gid) {
+struct target *get_gid_target(gid_t gid) {
struct target *w;
for(w = groups_root_target ; w ; w = w->next)
if(w->gid == gid) return w;
w = callocz(sizeof(struct target), 1);
- snprintfz(w->compare, MAX_COMPARE_NAME, "%u", gid);
- w->comparehash = simple_hash(w->compare);
- w->comparelen = strlen(w->compare);
+ w->type = TARGET_TYPE_GID;
+ w->gid = gid;
+ w->id = get_numeric_string(gid);
- snprintfz(w->id, MAX_NAME, "%u", gid);
- w->idhash = simple_hash(w->id);
+ CACHED_GROUPNAME cg = cached_groupname_get_by_gid(gid);
+ w->name = string_dup(cg.groupname);
+ w->clean_name = get_clean_name(w->name);
+ cached_groupname_release(cg);
- struct user_or_group_id group_id_to_find = {
- .id = {
- .gid = gid,
- }
- };
- struct user_or_group_id *group_id = group_id_find(&group_id_to_find);
+ w->next = groups_root_target;
+ groups_root_target = w;
- if(group_id && group_id->name && *group_id->name) {
- snprintfz(w->name, MAX_NAME, "%s", group_id->name);
- }
- else {
- struct group *gr = getgrgid(gid);
- if(!gr || !gr->gr_name || !*gr->gr_name)
- snprintfz(w->name, MAX_NAME, "%u", gid);
- else
- snprintfz(w->name, MAX_NAME, "%s", gr->gr_name);
- }
+ debug_log("added gid %u ('%s') target", w->gid, w->name);
- strncpyz(w->clean_name, w->name, MAX_NAME);
- netdata_fix_chart_name(w->clean_name);
+ return w;
+}
+#endif
- w->gid = gid;
+// --------------------------------------------------------------------------------------------------------------------
+// SID
- w->next = groups_root_target;
- groups_root_target = w;
+#if (PROCESSES_HAVE_SID == 1)
+struct target *sids_root_target = NULL;
- debug_log("added gid %u ('%s') target", w->gid, w->name);
+struct target *get_sid_target(STRING *sid_name) {
+ struct target *w;
+ for(w = sids_root_target ; w ; w = w->next)
+ if(w->sid_name == sid_name) return w;
+
+ w = callocz(sizeof(struct target), 1);
+ w->type = TARGET_TYPE_SID;
+ w->sid_name = string_dup(sid_name);
+ w->id = string_dup(sid_name);
+ w->name = string_dup(sid_name);
+ w->clean_name = get_clean_name(w->name);
+
+ w->next = sids_root_target;
+ sids_root_target = w;
+
+ debug_log("added uid %s ('%s') target", string2str(w->sid_name), string2str(w->name));
return w;
}
+#endif
+
+// --------------------------------------------------------------------------------------------------------------------
+// apps_groups.conf
+
+struct target *apps_groups_root_target = NULL;
// find or create a new target
// there are targets that are just aggregated to other target (the second argument)
-static struct target *get_apps_groups_target(const char *id, struct target *target, const char *name) {
- int tdebug = 0, thidden = target?target->hidden:0, ends_with = 0;
- const char *nid = id;
-
- // extract the options
- while(nid[0] == '-' || nid[0] == '+' || nid[0] == '*') {
- if(nid[0] == '-') thidden = 1;
- if(nid[0] == '+') tdebug = 1;
- if(nid[0] == '*') ends_with = 1;
- nid++;
- }
- uint32_t hash = simple_hash(id);
+static struct target *get_apps_groups_target(const char *comm, struct target *target, const char *name) {
+ APPS_MATCH match = pid_match_create(comm);
+ STRING *name_lookup = string_strdupz(name);
// find if it already exists
struct target *w, *last = apps_groups_root_target;
for(w = apps_groups_root_target ; w ; w = w->next) {
- if(w->idhash == hash && strncmp(nid, w->id, MAX_NAME) == 0)
+ if(w->id == match.compare) {
+ pid_match_cleanup(&match);
+ string_freez(name_lookup);
return w;
+ }
last = w;
}
// find an existing target
if(unlikely(!target)) {
- while(*name == '-') {
- if(*name == '-') thidden = 1;
- name++;
- }
-
- for(target = apps_groups_root_target ; target != NULL ; target = target->next) {
- if(!target->target && strcmp(name, target->name) == 0)
+ for(target = apps_groups_root_target ; target ; target = target->next) {
+ if(!target->target && name_lookup == target->name)
break;
}
-
- if(unlikely(debug_enabled)) {
- if(unlikely(target))
- debug_log("REUSING TARGET NAME '%s' on ID '%s'", target->name, target->id);
- else
- debug_log("NEW TARGET NAME '%s' on ID '%s'", name, id);
- }
}
if(target && target->target)
- fatal("Internal Error: request to link process '%s' to target '%s' which is linked to target '%s'", id, target->id, target->target->id);
+ fatal("Internal Error: request to link process '%s' to target '%s' which is linked to target '%s'",
+ comm, string2str(target->id), string2str(target->target->id));
w = callocz(sizeof(struct target), 1);
- strncpyz(w->id, nid, MAX_NAME);
- w->idhash = simple_hash(w->id);
+ w->type = TARGET_TYPE_APP_GROUP;
+ w->match = match;
+ w->id = string_dup(w->match.compare);
if(unlikely(!target))
- // copy the name
- strncpyz(w->name, name, MAX_NAME);
+ w->name = string_dup(name_lookup); // copy the name
else
- // copy the id
- strncpyz(w->name, nid, MAX_NAME);
+ w->name = string_dup(w->id); // copy the id
// dots are used to distinguish chart type and id in streaming, so we should replace them
- strncpyz(w->clean_name, w->name, MAX_NAME);
- netdata_fix_chart_name(w->clean_name);
- for (char *d = w->clean_name; *d; d++) {
- if (*d == '.')
- *d = '_';
- }
-
- strncpyz(w->compare, nid, MAX_COMPARE_NAME);
- size_t len = strlen(w->compare);
- if(w->compare[len - 1] == '*') {
- w->compare[len - 1] = '\0';
- w->starts_with = 1;
- }
- w->ends_with = ends_with;
+ w->clean_name = get_clean_name(w->name);
- if(w->starts_with && w->ends_with)
+ if(w->match.starts_with && w->match.ends_with)
proc_pid_cmdline_is_needed = true;
- w->comparehash = simple_hash(w->compare);
- w->comparelen = strlen(w->compare);
-
- w->hidden = thidden;
-#ifdef NETDATA_INTERNAL_CHECKS
- w->debug_enabled = tdebug;
-#else
- if(tdebug)
- fprintf(stderr, "apps.plugin has been compiled without debugging\n");
-#endif
w->target = target;
// append it, to maintain the order in apps_groups.conf
if(last) last->next = w;
else apps_groups_root_target = w;
- debug_log("ADDING TARGET ID '%s', process name '%s' (%s), aggregated on target '%s', options: %s %s"
- , w->id
- , w->compare, (w->starts_with && w->ends_with)?"substring":((w->starts_with)?"prefix":((w->ends_with)?"suffix":"exact"))
- , w->target?w->target->name:w->name
- , (w->hidden)?"hidden":"-"
- , (w->debug_enabled)?"debug":"-"
+ debug_log("ADDING TARGET ID '%s', process name '%s' (%s), aggregated on target '%s'"
+ , string2str(w->id)
+ , string2str(w->match.compare)
+ , (w->match.starts_with && w->match.ends_with) ? "substring" : ((w->match.starts_with) ? "prefix" : ((w->match.ends_with) ? "suffix" : "exact"))
+ , w->target?w->target->name:w->name
);
+ string_freez(name_lookup);
+
return w;
}
@@ -224,20 +395,49 @@ int read_apps_groups_conf(const char *path, const char *file) {
if(!words) continue;
char *name = procfile_lineword(ff, line, 0);
- if(!name || !*name) continue;
+ if(!name || !*name || *name == '#') continue;
+
+ if(strcmp(name, "managers") == 0) {
+ if(words == 2 && strcmp(procfile_lineword(ff, line, 1), "clear") == 0)
+ managed_list_clear(&tree.managers);
+
+ for(word = 1; word < words ;word++) {
+ char *s = procfile_lineword(ff, line, word);
+ if (!s || !*s) continue;
+ if (*s == '#') break;
+
+ managed_list_add(&tree.managers, s);
+ }
+
+ // done with managers, proceed to next line
+ continue;
+ }
+
+ if(strcmp(name, "interpreters") == 0) {
+ if(words == 2 && strcmp(procfile_lineword(ff, line, 1), "clear") == 0)
+ managed_list_clear(&tree.interpreters);
+
+ for(word = 1; word < words ;word++) {
+ char *s = procfile_lineword(ff, line, word);
+ if (!s || !*s) continue;
+ if (*s == '#') break;
+
+ managed_list_add(&tree.interpreters, s);
+ }
+
+ // done with managers, proceed to the next line
+ continue;
+ }
// find a possibly existing target
struct target *w = NULL;
// loop through all words, skipping the first one (the name)
- for(word = 0; word < words ;word++) {
+ for(word = 1; word < words ;word++) {
char *s = procfile_lineword(ff, line, word);
if(!s || !*s) continue;
if(*s == '#') break;
- // is this the first word? skip it
- if(s == name) continue;
-
// add this target
struct target *n = get_apps_groups_target(s, w, name);
if(!n) {
@@ -252,15 +452,5 @@ int read_apps_groups_conf(const char *path, const char *file) {
}
procfile_close(ff);
-
- apps_groups_default_target = get_apps_groups_target("p+!o@w#e$i^r&7*5(-i)l-o_", NULL, "other"); // match nothing
- if(!apps_groups_default_target)
- fatal("Cannot create default target");
- apps_groups_default_target->is_other = true;
-
- // allow the user to override group 'other'
- if(apps_groups_default_target->target)
- apps_groups_default_target = apps_groups_default_target->target;
-
return 0;
}
diff --git a/src/collectors/apps.plugin/apps_users_and_groups.c b/src/collectors/apps.plugin/apps_users_and_groups.c
deleted file mode 100644
index d28b39e79..000000000
--- a/src/collectors/apps.plugin/apps_users_and_groups.c
+++ /dev/null
@@ -1,206 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-#include "apps_plugin.h"
-
-// ----------------------------------------------------------------------------
-// read users and groups from files
-
-enum user_or_group_id_type {
- USER_ID,
- GROUP_ID
-};
-
-struct user_or_group_ids {
- enum user_or_group_id_type type;
-
- avl_tree_type index;
- struct user_or_group_id *root;
-
- char filename[FILENAME_MAX + 1];
-};
-
-int user_id_compare(void* a, void* b) {
- if(((struct user_or_group_id *)a)->id.uid < ((struct user_or_group_id *)b)->id.uid)
- return -1;
-
- else if(((struct user_or_group_id *)a)->id.uid > ((struct user_or_group_id *)b)->id.uid)
- return 1;
-
- else
- return 0;
-}
-
-struct user_or_group_ids all_user_ids = {
- .type = USER_ID,
-
- .index = {
- NULL,
- user_id_compare
- },
-
- .root = NULL,
-
- .filename = "",
-};
-
-int group_id_compare(void* a, void* b) {
- if(((struct user_or_group_id *)a)->id.gid < ((struct user_or_group_id *)b)->id.gid)
- return -1;
-
- else if(((struct user_or_group_id *)a)->id.gid > ((struct user_or_group_id *)b)->id.gid)
- return 1;
-
- else
- return 0;
-}
-
-struct user_or_group_ids all_group_ids = {
- .type = GROUP_ID,
-
- .index = {
- NULL,
- group_id_compare
- },
-
- .root = NULL,
-
- .filename = "",
-};
-
-int file_changed(const struct stat *statbuf __maybe_unused, struct timespec *last_modification_time __maybe_unused) {
-#if defined(__APPLE__)
- return 0;
-#else
- if(likely(statbuf->st_mtim.tv_sec == last_modification_time->tv_sec &&
- statbuf->st_mtim.tv_nsec == last_modification_time->tv_nsec)) return 0;
-
- last_modification_time->tv_sec = statbuf->st_mtim.tv_sec;
- last_modification_time->tv_nsec = statbuf->st_mtim.tv_nsec;
-
- return 1;
-#endif
-}
-
-int read_user_or_group_ids(struct user_or_group_ids *ids, struct timespec *last_modification_time) {
- struct stat statbuf;
- if(unlikely(stat(ids->filename, &statbuf)))
- return 1;
- else
- if(likely(!file_changed(&statbuf, last_modification_time))) return 0;
-
- procfile *ff = procfile_open(ids->filename, " :\t", PROCFILE_FLAG_DEFAULT);
- if(unlikely(!ff)) return 1;
-
- ff = procfile_readall(ff);
- if(unlikely(!ff)) return 1;
-
- size_t line, lines = procfile_lines(ff);
-
- for(line = 0; line < lines ;line++) {
- size_t words = procfile_linewords(ff, line);
- if(unlikely(words < 3)) continue;
-
- char *name = procfile_lineword(ff, line, 0);
- if(unlikely(!name || !*name)) continue;
-
- char *id_string = procfile_lineword(ff, line, 2);
- if(unlikely(!id_string || !*id_string)) continue;
-
-
- struct user_or_group_id *user_or_group_id = callocz(1, sizeof(struct user_or_group_id));
-
- if(ids->type == USER_ID)
- user_or_group_id->id.uid = (uid_t) str2ull(id_string, NULL);
- else
- user_or_group_id->id.gid = (uid_t) str2ull(id_string, NULL);
-
- user_or_group_id->name = strdupz(name);
- user_or_group_id->updated = 1;
-
- struct user_or_group_id *existing_user_id = NULL;
-
- if(likely(ids->root))
- existing_user_id = (struct user_or_group_id *)avl_search(&ids->index, (avl_t *) user_or_group_id);
-
- if(unlikely(existing_user_id)) {
- freez(existing_user_id->name);
- existing_user_id->name = user_or_group_id->name;
- existing_user_id->updated = 1;
- freez(user_or_group_id);
- }
- else {
- if(unlikely(avl_insert(&ids->index, (avl_t *) user_or_group_id) != (void *) user_or_group_id)) {
- netdata_log_error("INTERNAL ERROR: duplicate indexing of id during realloc");
- }
-
- user_or_group_id->next = ids->root;
- ids->root = user_or_group_id;
- }
- }
-
- procfile_close(ff);
-
- // remove unused ids
- struct user_or_group_id *user_or_group_id = ids->root, *prev_user_id = NULL;
-
- while(user_or_group_id) {
- if(unlikely(!user_or_group_id->updated)) {
- if(unlikely((struct user_or_group_id *)avl_remove(&ids->index, (avl_t *) user_or_group_id) != user_or_group_id))
- netdata_log_error("INTERNAL ERROR: removal of unused id from index, removed a different id");
-
- if(prev_user_id)
- prev_user_id->next = user_or_group_id->next;
- else
- ids->root = user_or_group_id->next;
-
- freez(user_or_group_id->name);
- freez(user_or_group_id);
-
- if(prev_user_id)
- user_or_group_id = prev_user_id->next;
- else
- user_or_group_id = ids->root;
- }
- else {
- user_or_group_id->updated = 0;
-
- prev_user_id = user_or_group_id;
- user_or_group_id = user_or_group_id->next;
- }
- }
-
- return 0;
-}
-
-struct user_or_group_id *user_id_find(struct user_or_group_id *user_id_to_find) {
- if(*netdata_configured_host_prefix) {
- static struct timespec last_passwd_modification_time;
- int ret = read_user_or_group_ids(&all_user_ids, &last_passwd_modification_time);
-
- if(likely(!ret && all_user_ids.index.root))
- return (struct user_or_group_id *)avl_search(&all_user_ids.index, (avl_t *)user_id_to_find);
- }
-
- return NULL;
-}
-
-struct user_or_group_id *group_id_find(struct user_or_group_id *group_id_to_find) {
- if(*netdata_configured_host_prefix) {
- static struct timespec last_group_modification_time;
- int ret = read_user_or_group_ids(&all_group_ids, &last_group_modification_time);
-
- if(likely(!ret && all_group_ids.index.root))
- return (struct user_or_group_id *)avl_search(&all_group_ids.index, (avl_t *) &group_id_to_find);
- }
-
- return NULL;
-}
-
-void users_and_groups_init(void) {
- snprintfz(all_user_ids.filename, FILENAME_MAX, "%s/etc/passwd", netdata_configured_host_prefix);
- debug_log("passwd file: '%s'", all_user_ids.filename);
-
- snprintfz(all_group_ids.filename, FILENAME_MAX, "%s/etc/group", netdata_configured_host_prefix);
- debug_log("group file: '%s'", all_group_ids.filename);
-}
-
diff --git a/src/collectors/apps.plugin/busy_threads.c b/src/collectors/apps.plugin/busy_threads.c
new file mode 100644
index 000000000..490c66148
--- /dev/null
+++ b/src/collectors/apps.plugin/busy_threads.c
@@ -0,0 +1,76 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+/*
+ * A very simple pthreads program to spawn N busy threads.
+ * It is just used for validating apps.plugin CPU utilization
+ * calculations per operating system.
+ *
+ * Compile with:
+ *
+ * gcc -O2 -ggdb -o busy_threads busy_threads.c -pthread
+ *
+ * Run as:
+ *
+ * busy_threads 2
+ *
+ * The above will create 2 busy threads, each using 1 core in user time.
+ *
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <pthread.h>
+#include <signal.h>
+#include <unistd.h>
+
+volatile int keep_running = 1;
+
+void handle_signal(int signal) {
+ keep_running = 0;
+}
+
+void *busy_loop(void *arg) {
+ while (keep_running) {
+ // Busy loop to keep CPU at 100%
+ }
+ return NULL;
+}
+
+int main(int argc, char *argv[]) {
+ if (argc != 2) {
+ fprintf(stderr, "Usage: %s <number of threads>\n", argv[0]);
+ exit(EXIT_FAILURE);
+ }
+
+ int num_threads = atoi(argv[1]);
+ if (num_threads <= 0) {
+ fprintf(stderr, "Number of threads must be a positive integer.\n");
+ exit(EXIT_FAILURE);
+ }
+
+ // Register the signal handler to gracefully exit on Ctrl-C
+ signal(SIGINT, handle_signal);
+
+ pthread_t *threads = malloc(sizeof(pthread_t) * num_threads);
+ if (threads == NULL) {
+ perror("malloc");
+ exit(EXIT_FAILURE);
+ }
+
+ // Create threads
+ for (int i = 0; i < num_threads; i++) {
+ if (pthread_create(&threads[i], NULL, busy_loop, NULL) != 0) {
+ perror("pthread_create");
+ free(threads);
+ exit(EXIT_FAILURE);
+ }
+ }
+
+ // Wait for threads to finish (they never will unless interrupted)
+ for (int i = 0; i < num_threads; i++) {
+ pthread_join(threads[i], NULL);
+ }
+
+ free(threads);
+ return 0;
+}
diff --git a/src/collectors/cgroups.plugin/README.md b/src/collectors/cgroups.plugin/README.md
index efa868bfb..dc58973af 100644
--- a/src/collectors/cgroups.plugin/README.md
+++ b/src/collectors/cgroups.plugin/README.md
@@ -1,12 +1,3 @@
-<!--
-title: "Monitor Cgroups (cgroups.plugin)"
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/collectors/cgroups.plugin/README.md"
-sidebar_label: "Monitor Cgroups"
-learn_status: "Published"
-learn_topic_type: "References"
-learn_rel_path: "Integrations/Monitor/Virtualized environments/Containers"
--->
-
# Monitor Cgroups (cgroups.plugin)
You can monitor containers and virtual machines using **cgroups**.
diff --git a/src/collectors/cgroups.plugin/cgroup-discovery.c b/src/collectors/cgroups.plugin/cgroup-discovery.c
index d880f8a71..5d3027a47 100644
--- a/src/collectors/cgroups.plugin/cgroup-discovery.c
+++ b/src/collectors/cgroups.plugin/cgroup-discovery.c
@@ -23,7 +23,7 @@ struct cgroup *discovered_cgroup_root = NULL;
char cgroup_chart_id_prefix[] = "cgroup_";
char services_chart_id_prefix[] = "systemd_";
-char *cgroups_rename_script = NULL;
+const char *cgroups_rename_script = NULL;
// Shared memory with information from detected cgroups
netdata_ebpf_cgroup_shm_t shm_cgroup_ebpf = {NULL, NULL};
@@ -188,7 +188,7 @@ static inline void discovery_rename_cgroup(struct cgroup *cg) {
}
char buffer[CGROUP_CHARTID_LINE_MAX + 1];
- char *new_name = fgets(buffer, CGROUP_CHARTID_LINE_MAX, instance->child_stdout_fp);
+ char *new_name = fgets(buffer, CGROUP_CHARTID_LINE_MAX, spawn_popen_stdout(instance));
int exit_code = spawn_popen_wait(instance);
switch (exit_code) {
@@ -1101,7 +1101,7 @@ static inline void read_cgroup_network_interfaces(struct cgroup *cg) {
char *s;
char buffer[CGROUP_NETWORK_INTERFACE_MAX_LINE + 1];
- while((s = fgets(buffer, CGROUP_NETWORK_INTERFACE_MAX_LINE, instance->child_stdout_fp))) {
+ while((s = fgets(buffer, CGROUP_NETWORK_INTERFACE_MAX_LINE, spawn_popen_stdout(instance)))) {
trim(s);
if(*s && *s != '\n') {
diff --git a/src/collectors/cgroups.plugin/cgroup-internals.h b/src/collectors/cgroups.plugin/cgroup-internals.h
index e0d53dc93..cdb5837bd 100644
--- a/src/collectors/cgroups.plugin/cgroup-internals.h
+++ b/src/collectors/cgroups.plugin/cgroup-internals.h
@@ -273,7 +273,7 @@ struct discovery_thread {
extern struct discovery_thread discovery_thread;
-extern char *cgroups_rename_script;
+extern const char *cgroups_rename_script;
extern char cgroup_chart_id_prefix[];
extern char services_chart_id_prefix[];
extern uv_mutex_t cgroup_root_mutex;
@@ -313,7 +313,7 @@ extern SIMPLE_PATTERN *enabled_cgroup_renames;
extern SIMPLE_PATTERN *systemd_services_cgroups;
extern SIMPLE_PATTERN *entrypoint_parent_process_comm;
-extern char *cgroups_network_interface_script;
+extern const char *cgroups_network_interface_script;
extern int cgroups_check;
@@ -394,8 +394,8 @@ static inline char *cgroup_chart_type(char *buffer, struct cgroup *cg) {
#define RRDFUNCTIONS_CGTOP_HELP "View running containers"
#define RRDFUNCTIONS_SYSTEMD_SERVICES_HELP "View systemd services"
-int cgroup_function_cgroup_top(BUFFER *wb, const char *function);
-int cgroup_function_systemd_top(BUFFER *wb, const char *function);
+int cgroup_function_cgroup_top(BUFFER *wb, const char *function, BUFFER *payload, const char *source);
+int cgroup_function_systemd_top(BUFFER *wb, const char *function, BUFFER *payload, const char *source);
void cgroup_netdev_link_init(void);
const DICTIONARY_ITEM *cgroup_netdev_get(struct cgroup *cg);
diff --git a/src/collectors/cgroups.plugin/cgroup-name.sh.in b/src/collectors/cgroups.plugin/cgroup-name.sh.in
index 0f8b63256..18755b622 100755
--- a/src/collectors/cgroups.plugin/cgroup-name.sh.in
+++ b/src/collectors/cgroups.plugin/cgroup-name.sh.in
@@ -155,7 +155,7 @@ function docker_like_get_name_api() {
info "Running API command: curl \"${host}${path}\""
JSON=$(curl -sS "${host}${path}")
fi
- if OUTPUT=$(echo "${JSON}" | jq -r '.Config.Env[],"CONT_NAME=\(.Name)","IMAGE_NAME=\(.Config.Image)"') && [ -n "$OUTPUT" ]; then
+ if OUTPUT=$(echo "${JSON}" | jq -r '.Config.Env[]?,"CONT_NAME=\(.Name)","IMAGE_NAME=\(.Config.Image)"') && [ -n "$OUTPUT" ]; then
parse_docker_like_inspect_output "$OUTPUT"
fi
return 0
@@ -610,7 +610,7 @@ function podman_validate_id() {
DOCKER_HOST="${DOCKER_HOST:=/var/run/docker.sock}"
PODMAN_HOST="${PODMAN_HOST:=/run/podman/podman.sock}"
CGROUP_PATH="${1}" # the path as it is (e.g. '/docker/efcf4c409')
-CGROUP="${2}" # the modified path (e.g. 'docker_efcf4c409')
+CGROUP="${2//\//_}" # the modified path (e.g. 'docker_efcf4c409')
EXIT_SUCCESS=0
EXIT_RETRY=2
EXIT_DISABLE=3
diff --git a/src/collectors/cgroups.plugin/cgroup-network.c b/src/collectors/cgroups.plugin/cgroup-network.c
index 4cb5cbabe..d64b31288 100644
--- a/src/collectors/cgroups.plugin/cgroup-network.c
+++ b/src/collectors/cgroups.plugin/cgroup-network.c
@@ -3,6 +3,8 @@
#include "libnetdata/libnetdata.h"
#include "libnetdata/required_dummies.h"
+SPAWN_SERVER *spawn_server = NULL;
+
char env_netdata_host_prefix[FILENAME_MAX + 50] = "";
char env_netdata_log_method[FILENAME_MAX + 50] = "";
char env_netdata_log_format[FILENAME_MAX + 50] = "";
@@ -42,7 +44,7 @@ unsigned int read_iface_iflink(const char *prefix, const char *iface) {
unsigned long long iflink = 0;
int ret = read_single_number_file(filename, &iflink);
- if(ret) collector_error("Cannot read '%s'.", filename);
+ if(ret) nd_log(NDLS_COLLECTORS, NDLP_ERR, "Cannot read '%s'.", filename);
return (unsigned int)iflink;
}
@@ -55,7 +57,7 @@ unsigned int read_iface_ifindex(const char *prefix, const char *iface) {
unsigned long long ifindex = 0;
int ret = read_single_number_file(filename, &ifindex);
- if(ret) collector_error("Cannot read '%s'.", filename);
+ if(ret) nd_log(NDLS_COLLECTORS, NDLP_ERR, "Cannot read '%s'.", filename);
return (unsigned int)ifindex;
}
@@ -68,19 +70,15 @@ struct iface *read_proc_net_dev(const char *scope __maybe_unused, const char *pr
snprintfz(filename, FILENAME_MAX, "%s%s", prefix, (*prefix)?"/proc/1/net/dev":"/proc/net/dev");
-#ifdef NETDATA_INTERNAL_CHECKS
- collector_info("parsing '%s'", filename);
-#endif
-
ff = procfile_open(filename, " \t,:|", PROCFILE_FLAG_DEFAULT);
if(unlikely(!ff)) {
- collector_error("Cannot open file '%s'", filename);
+ nd_log(NDLS_COLLECTORS, NDLP_ERR, "Cannot open file '%s'", filename);
return NULL;
}
ff = procfile_readall(ff);
if(unlikely(!ff)) {
- collector_error("Cannot read file '%s'", filename);
+ nd_log(NDLS_COLLECTORS, NDLP_ERR, "Cannot read file '%s'", filename);
return NULL;
}
@@ -97,9 +95,7 @@ struct iface *read_proc_net_dev(const char *scope __maybe_unused, const char *pr
t->next = root;
root = t;
-#ifdef NETDATA_INTERNAL_CHECKS
- collector_info("added %s interface '%s', ifindex %u, iflink %u", scope, t->device, t->ifindex, t->iflink);
-#endif
+ nd_log(NDLS_COLLECTORS, NDLP_DEBUG, "added %s interface '%s', ifindex %u, iflink %u", scope, t->device, t->ifindex, t->iflink);
}
procfile_close(ff);
@@ -143,13 +139,18 @@ static void continue_as_child(void) {
int status;
pid_t ret;
- if (child < 0)
- collector_error("fork() failed");
+ if (child < 0) {
+ nd_log(NDLS_COLLECTORS, NDLP_ERR, "fork() failed");
+ exit(1);
+ }
- /* Only the child returns */
- if (child == 0)
+ if (child == 0) {
+ // the child returns
+ gettid_uncached();
return;
+ }
+ // here is the parent
for (;;) {
ret = waitpid(child, &status, WUNTRACED);
if ((ret == child) && (WIFSTOPPED(status))) {
@@ -159,9 +160,36 @@ static void continue_as_child(void) {
} else {
break;
}
+ tinysleep();
}
/* Return the child's exit code if possible */
+
+#ifdef __SANITIZE_ADDRESS__
+ /*
+ * With sanitization, exiting leads to an infinite loop (100% cpu) here:
+ *
+ * #0 0x00007ffff690ea8b in sched_yield () from /usr/lib/libc.so.6
+ * #1 0x00007ffff792c4a6 in __sanitizer::StopTheWorld (callback=<optimized out>, argument=<optimized out>) at /usr/src/debug/gcc/gcc/libsanitizer/sanitizer_common/sanitizer_stoptheworld_linux_libcdep.cpp:457
+ * #2 0x00007ffff793f6f9 in __lsan::LockStuffAndStopTheWorldCallback (info=<optimized out>, size=<optimized out>, data=0x7fffffffde20) at /usr/src/debug/gcc/gcc/libsanitizer/lsan/lsan_common_linux.cpp:127
+ * #3 0x00007ffff6977909 in dl_iterate_phdr () from /usr/lib/libc.so.6
+ * #4 0x00007ffff793fb24 in __lsan::LockStuffAndStopTheWorld (callback=callback@entry=0x7ffff793d9d0 <__lsan::CheckForLeaksCallback(__sanitizer::SuspendedThreadsList const&, void*)>, argument=argument@entry=0x7fffffffdea0)
+ * at /usr/src/debug/gcc/gcc/libsanitizer/lsan/lsan_common_linux.cpp:142
+ * #5 0x00007ffff793c965 in __lsan::CheckForLeaks () at /usr/src/debug/gcc/gcc/libsanitizer/lsan/lsan_common.cpp:778
+ * #6 0x00007ffff793cc68 in __lsan::DoLeakCheck () at /usr/src/debug/gcc/gcc/libsanitizer/lsan/lsan_common.cpp:821
+ * #7 0x00007ffff684e340 in __cxa_finalize () from /usr/lib/libc.so.6
+ * #8 0x00007ffff7838c58 in __do_global_dtors_aux () from /usr/lib/libasan.so.8
+ * #9 0x00007fffffffdfe0 in ?? ()
+ *
+ * Probably is something related to switching name spaces.
+ * So, we kill -9 self.
+ *
+ */
+
+ nd_log(NDLS_COLLECTORS, NDLP_DEBUG, "sanitizers detected, killing myself to avoid lockup");
+ kill(getpid(), SIGKILL);
+#endif
+
if (WIFEXITED(status)) {
exit(WEXITSTATUS(status));
} else if (WIFSIGNALED(status)) {
@@ -179,7 +207,7 @@ int proc_pid_fd(const char *prefix, const char *ns, pid_t pid) {
int fd = open(filename, O_RDONLY | O_CLOEXEC);
if(fd == -1)
- collector_error("Cannot open proc_pid_fd() file '%s'", filename);
+ nd_log(NDLS_COLLECTORS, NDLP_ERR, "Cannot open proc_pid_fd() file '%s'", filename);
return fd;
}
@@ -203,10 +231,8 @@ static struct ns {
{ .nstype = 0, .fd = -1, .status = -1, .name = NULL, .path = NULL }
};
-int switch_namespace(const char *prefix, pid_t pid) {
-
+static int switch_namespace(const char *prefix, pid_t pid) {
#ifdef HAVE_SETNS
-
int i;
for(i = 0; all_ns[i].name ; i++)
all_ns[i].fd = proc_pid_fd(prefix, all_ns[i].path, pid);
@@ -229,7 +255,9 @@ int switch_namespace(const char *prefix, pid_t pid) {
if(setns(all_ns[i].fd, all_ns[i].nstype) == -1) {
if(pass == 1) {
all_ns[i].status = 0;
- collector_error("Cannot switch to %s namespace of pid %d", all_ns[i].name, (int) pid);
+ nd_log(NDLS_COLLECTORS, NDLP_ERR,
+ "Cannot switch to %s namespace of pid %d",
+ all_ns[i].name, (int) pid);
}
}
else
@@ -238,21 +266,22 @@ int switch_namespace(const char *prefix, pid_t pid) {
}
}
+ gettid_uncached();
setgroups(0, NULL);
if(root_fd != -1) {
if(fchdir(root_fd) < 0)
- collector_error("Cannot fchdir() to pid %d root directory", (int)pid);
+ nd_log(NDLS_COLLECTORS, NDLP_ERR, "Cannot fchdir() to pid %d root directory", (int)pid);
if(chroot(".") < 0)
- collector_error("Cannot chroot() to pid %d root directory", (int)pid);
+ nd_log(NDLS_COLLECTORS, NDLP_ERR, "Cannot chroot() to pid %d root directory", (int)pid);
close(root_fd);
}
if(cwd_fd != -1) {
if(fchdir(cwd_fd) < 0)
- collector_error("Cannot fchdir() to pid %d current working directory", (int)pid);
+ nd_log(NDLS_COLLECTORS, NDLP_ERR, "Cannot fchdir() to pid %d current working directory", (int)pid);
close(cwd_fd);
}
@@ -276,9 +305,8 @@ int switch_namespace(const char *prefix, pid_t pid) {
#else
errno = ENOSYS;
- collector_error("setns() is missing on this system.");
+ nd_log(NDLS_COLLECTORS, NDLP_ERR, "setns() is missing on this system.");
return 1;
-
#endif
}
@@ -286,13 +314,13 @@ pid_t read_pid_from_cgroup_file(const char *filename) {
int fd = open(filename, procfile_open_flags);
if(fd == -1) {
if (errno != ENOENT)
- collector_error("Cannot open pid_from_cgroup() file '%s'.", filename);
+ nd_log(NDLS_COLLECTORS, NDLP_ERR, "Cannot open pid_from_cgroup() file '%s'.", filename);
return 0;
}
FILE *fp = fdopen(fd, "r");
if(!fp) {
- collector_error("Cannot upgrade fd to fp for file '%s'.", filename);
+ nd_log(NDLS_COLLECTORS, NDLP_ERR, "Cannot upgrade fd to fp for file '%s'.", filename);
return 0;
}
@@ -307,9 +335,8 @@ pid_t read_pid_from_cgroup_file(const char *filename) {
fclose(fp);
-#ifdef NETDATA_INTERNAL_CHECKS
- if(pid > 0) collector_info("found pid %d on file '%s'", pid, filename);
-#endif
+ if(pid > 0)
+ nd_log(NDLS_COLLECTORS, NDLP_DEBUG, "found pid %d on file '%s'", pid, filename);
return pid;
}
@@ -331,7 +358,7 @@ pid_t read_pid_from_cgroup(const char *path) {
DIR *dir = opendir(path);
if (!dir) {
- collector_error("cannot read directory '%s'", path);
+ nd_log(NDLS_COLLECTORS, NDLP_ERR, "cannot read directory '%s'", path);
return 0;
}
@@ -368,9 +395,8 @@ struct found_device {
} *detected_devices = NULL;
void add_device(const char *host, const char *guest) {
-#ifdef NETDATA_INTERNAL_CHECKS
- collector_info("adding device with host '%s', guest '%s'", host, guest);
-#endif
+ errno_clear();
+ nd_log(NDLS_COLLECTORS, NDLP_DEBUG, "adding device with host '%s', guest '%s'", host, guest);
uint32_t hash = simple_hash(host);
@@ -422,36 +448,34 @@ void detect_veth_interfaces(pid_t pid) {
host = read_proc_net_dev("host", netdata_configured_host_prefix);
if(!host) {
errno_clear();
- collector_error("cannot read host interface list.");
+ nd_log(NDLS_COLLECTORS, NDLP_WARNING, "no host interface list.");
goto cleanup;
}
if(!eligible_ifaces(host)) {
errno_clear();
- collector_info("there are no double-linked host interfaces available.");
+ nd_log(NDLS_COLLECTORS, NDLP_WARNING, "no double-linked host interfaces available.");
goto cleanup;
}
if(switch_namespace(netdata_configured_host_prefix, pid)) {
errno_clear();
- collector_error("cannot switch to the namespace of pid %u", (unsigned int) pid);
+ nd_log(NDLS_COLLECTORS, NDLP_ERR, "cannot switch to the namespace of pid %u", (unsigned int) pid);
goto cleanup;
}
-#ifdef NETDATA_INTERNAL_CHECKS
- collector_info("switched to namespaces of pid %d", pid);
-#endif
+ nd_log(NDLS_COLLECTORS, NDLP_DEBUG, "switched to namespaces of pid %d", pid);
cgroup = read_proc_net_dev("cgroup", NULL);
if(!cgroup) {
errno_clear();
- collector_error("cannot read cgroup interface list.");
+ nd_log(NDLS_COLLECTORS, NDLP_ERR, "cannot read cgroup interface list.");
goto cleanup;
}
if(!eligible_ifaces(cgroup)) {
errno_clear();
- collector_error("there are not double-linked cgroup interfaces available.");
+ nd_log(NDLS_COLLECTORS, NDLP_ERR, "there are not double-linked cgroup interfaces available.");
goto cleanup;
}
@@ -478,66 +502,113 @@ void detect_veth_interfaces(pid_t pid) {
if(iface_is_eligible(h)) {
for (c = cgroup; c; c = c->next) {
if(iface_is_eligible(c) && h->ifindex == c->iflink && h->iflink == c->ifindex) {
- add_device(h->device, c->device);
+ printf("%s %s\n", h->device, c->device);
+ // add_device(h->device, c->device);
}
}
}
}
+ printf("EXIT DONE\n");
+ fflush(stdout);
+
cleanup:
free_host_ifaces(cgroup);
free_host_ifaces(host);
}
+struct send_to_spawned_process {
+ pid_t pid;
+ char host_prefix[FILENAME_MAX];
+};
+
+
+static int spawn_callback(SPAWN_REQUEST *request) {
+ const struct send_to_spawned_process *d = request->data;
+ detect_veth_interfaces(d->pid);
+ return 0;
+}
+
+#define CGROUP_NETWORK_INTERFACE_MAX_LINE 2048
+static void read_from_spawned(SPAWN_INSTANCE *si, const char *name __maybe_unused) {
+ char buffer[CGROUP_NETWORK_INTERFACE_MAX_LINE + 1];
+ char *s;
+ FILE *fp = fdopen(spawn_server_instance_read_fd(si), "r");
+ while((s = fgets(buffer, CGROUP_NETWORK_INTERFACE_MAX_LINE, fp))) {
+ trim(s);
+
+ if(*s && *s != '\n') {
+ char *t = s;
+ while(*t && *t != ' ') t++;
+ if(*t == ' ') {
+ *t = '\0';
+ t++;
+ }
+
+ if(strcmp(s, "EXIT") == 0)
+ break;
+
+ if(!*s || !*t) continue;
+ add_device(s, t);
+ }
+ }
+ fclose(fp);
+ spawn_server_instance_read_fd_unset(si);
+ spawn_server_exec_kill(spawn_server, si);
+}
+
+void detect_veth_interfaces_spawn(pid_t pid) {
+ struct send_to_spawned_process d = {
+ .pid = pid,
+ };
+ strncpyz(d.host_prefix, netdata_configured_host_prefix, sizeof(d.host_prefix) - 1);
+ SPAWN_INSTANCE *si = spawn_server_exec(spawn_server, STDERR_FILENO, 0, NULL, &d, sizeof(d), SPAWN_INSTANCE_TYPE_CALLBACK);
+ if(si)
+ read_from_spawned(si, "switch namespace callback");
+ else
+ nd_log(NDLS_COLLECTORS, NDLP_ERR, "cgroup-network cannot spawn switch namespace callback");
+}
+
// ----------------------------------------------------------------------------
// call the external helper
#define CGROUP_NETWORK_INTERFACE_MAX_LINE 2048
void call_the_helper(pid_t pid, const char *cgroup) {
- if(setresuid(0, 0, 0) == -1)
- collector_error("setresuid(0, 0, 0) failed.");
-
char command[CGROUP_NETWORK_INTERFACE_MAX_LINE + 1];
if(cgroup)
snprintfz(command, CGROUP_NETWORK_INTERFACE_MAX_LINE, "exec " PLUGINS_DIR "/cgroup-network-helper.sh --cgroup '%s'", cgroup);
else
snprintfz(command, CGROUP_NETWORK_INTERFACE_MAX_LINE, "exec " PLUGINS_DIR "/cgroup-network-helper.sh --pid %d", pid);
- collector_info("running: %s", command);
+ nd_log(NDLS_COLLECTORS, NDLP_DEBUG, "running: %s", command);
- POPEN_INSTANCE *pi;
+ SPAWN_INSTANCE *si;
- if(cgroup)
- pi = spawn_popen_run_variadic(PLUGINS_DIR "/cgroup-network-helper.sh", "--cgroup", cgroup, NULL);
+ if(cgroup) {
+ const char *argv[] = {
+ PLUGINS_DIR "/cgroup-network-helper.sh",
+ "--cgroup",
+ cgroup,
+ NULL,
+ };
+ si = spawn_server_exec(spawn_server, nd_log_collectors_fd(), 0, argv, NULL, 0, SPAWN_INSTANCE_TYPE_EXEC);
+ }
else {
char buffer[100];
snprintfz(buffer, sizeof(buffer) - 1, "%d", pid);
- pi = spawn_popen_run_variadic(PLUGINS_DIR "/cgroup-network-helper.sh", "--pid", buffer, NULL);
+ const char *argv[] = {
+ PLUGINS_DIR "/cgroup-network-helper.sh",
+ "--pid",
+ buffer,
+ NULL,
+ };
+ si = spawn_server_exec(spawn_server, nd_log_collectors_fd(), 0, argv, NULL, 0, SPAWN_INSTANCE_TYPE_EXEC);
}
- if(pi) {
- char buffer[CGROUP_NETWORK_INTERFACE_MAX_LINE + 1];
- char *s;
- while((s = fgets(buffer, CGROUP_NETWORK_INTERFACE_MAX_LINE, pi->child_stdout_fp))) {
- trim(s);
-
- if(*s && *s != '\n') {
- char *t = s;
- while(*t && *t != ' ') t++;
- if(*t == ' ') {
- *t = '\0';
- t++;
- }
-
- if(!*s || !*t) continue;
- add_device(s, t);
- }
- }
-
- spawn_popen_kill(pi);
- }
+ if(si)
+ read_from_spawned(si, command);
else
- collector_error("cannot execute cgroup-network helper script: %s", command);
+ nd_log(NDLS_COLLECTORS, NDLP_ERR, "cannot execute cgroup-network helper script: %s", command);
}
int is_valid_path_symbol(char c) {
@@ -568,33 +639,33 @@ int verify_path(const char *path) {
const char *s = path;
while((c = *s++)) {
if(!( isalnum(c) || is_valid_path_symbol(c) )) {
- collector_error("invalid character in path '%s'", path);
+ nd_log(NDLS_COLLECTORS, NDLP_ERR, "invalid character in path '%s'", path);
return -1;
}
}
if(strstr(path, "\\") && !strstr(path, "\\x")) {
- collector_error("invalid escape sequence in path '%s'", path);
+ nd_log(NDLS_COLLECTORS, NDLP_ERR, "invalid escape sequence in path '%s'", path);
return 1;
}
if(strstr(path, "/../")) {
- collector_error("invalid parent path sequence detected in '%s'", path);
+ nd_log(NDLS_COLLECTORS, NDLP_ERR, "invalid parent path sequence detected in '%s'", path);
return 1;
}
if(path[0] != '/') {
- collector_error("only absolute path names are supported - invalid path '%s'", path);
+ nd_log(NDLS_COLLECTORS, NDLP_ERR, "only absolute path names are supported - invalid path '%s'", path);
return -1;
}
if (stat(path, &sb) == -1) {
- collector_error("cannot stat() path '%s'", path);
+ nd_log(NDLS_COLLECTORS, NDLP_ERR, "cannot stat() path '%s'", path);
return -1;
}
if((sb.st_mode & S_IFMT) != S_IFDIR) {
- collector_error("path '%s' is not a directory", path);
+ nd_log(NDLS_COLLECTORS, NDLP_ERR, "path '%s' is not a directory", path);
return -1;
}
@@ -616,10 +687,10 @@ char *fix_path_variable(void) {
char *s = strsep(&ptr, ":");
if(s && *s) {
if(verify_path(s) == -1) {
- collector_error("the PATH variable includes an invalid path '%s' - removed it.", s);
+ nd_log(NDLS_COLLECTORS, NDLP_ERR, "the PATH variable includes an invalid path '%s' - removed it.", s);
}
else {
- collector_info("the PATH variable includes a valid path '%s'.", s);
+ nd_log(NDLS_COLLECTORS, NDLP_DEBUG, "the PATH variable includes a valid path '%s'.", s);
if(added) strcat(safe_path, ":");
strcat(safe_path, s);
added++;
@@ -627,8 +698,8 @@ char *fix_path_variable(void) {
}
}
- collector_info("unsafe PATH: '%s'.", path);
- collector_info(" safe PATH: '%s'.", safe_path);
+ nd_log(NDLS_COLLECTORS, NDLP_DEBUG, "unsafe PATH: '%s'.", path);
+ nd_log(NDLS_COLLECTORS, NDLP_DEBUG, " safe PATH: '%s'.", safe_path);
freez(p);
return safe_path;
@@ -643,11 +714,14 @@ void usage(void) {
exit(1);
}
-int main(int argc, char **argv) {
+int main(int argc, const char **argv) {
pid_t pid = 0;
- clocks_init();
+ if (setresuid(0, 0, 0) == -1)
+ collector_error("setresuid(0, 0, 0) failed.");
+
nd_log_initialize_for_external_plugins("cgroup-network");
+ spawn_server = spawn_server_create(SPAWN_SERVER_OPTION_EXEC | SPAWN_SERVER_OPTION_CALLBACK, NULL, spawn_callback, argc, argv);
// since cgroup-network runs as root, prevent it from opening symbolic links
procfile_open_flags = O_RDONLY|O_NOFOLLOW;
@@ -700,16 +774,16 @@ int main(int argc, char **argv) {
if(pid <= 0) {
errno_clear();
- collector_error("Invalid pid %d given", (int) pid);
+ nd_log(NDLS_COLLECTORS, NDLP_ERR, "Invalid pid %d given", (int) pid);
return 2;
}
if(helper) call_the_helper(pid, NULL);
}
else if(!strcmp(argv[arg], "--cgroup")) {
- char *cgroup = argv[arg+1];
+ const char *cgroup = argv[arg+1];
if(verify_path(cgroup) == -1) {
- collector_error("cgroup '%s' does not exist or is not valid.", cgroup);
+ nd_log(NDLS_COLLECTORS, NDLP_ERR, "cgroup '%s' does not exist or is not valid.", cgroup);
return 1;
}
@@ -718,16 +792,19 @@ int main(int argc, char **argv) {
if(pid <= 0 && !detected_devices) {
errno_clear();
- collector_error("Cannot find a cgroup PID from cgroup '%s'", cgroup);
+ nd_log(NDLS_COLLECTORS, NDLP_ERR, "Cannot find a cgroup PID from cgroup '%s'", cgroup);
}
}
else
usage();
if(pid > 0)
- detect_veth_interfaces(pid);
+ detect_veth_interfaces_spawn(pid);
int found = send_devices();
+
+ spawn_server_destroy(spawn_server);
+
if(found <= 0) return 1;
return 0;
}
diff --git a/src/collectors/cgroups.plugin/cgroup-top.c b/src/collectors/cgroups.plugin/cgroup-top.c
index aa413dad1..7b98502b5 100644
--- a/src/collectors/cgroups.plugin/cgroup-top.c
+++ b/src/collectors/cgroups.plugin/cgroup-top.c
@@ -98,7 +98,7 @@ void cgroup_netdev_get_bandwidth(struct cgroup *cg, NETDATA_DOUBLE *received, NE
*sent = t->sent[slot];
}
-int cgroup_function_cgroup_top(BUFFER *wb, const char *function __maybe_unused) {
+int cgroup_function_cgroup_top(BUFFER *wb, const char *function __maybe_unused, BUFFER *payload __maybe_unused, const char *source __maybe_unused) {
buffer_flush(wb);
wb->content_type = CT_APPLICATION_JSON;
buffer_json_initialize(wb, "\"", "\"", 0, true, BUFFER_JSON_OPTIONS_DEFAULT);
@@ -341,7 +341,7 @@ int cgroup_function_cgroup_top(BUFFER *wb, const char *function __maybe_unused)
return HTTP_RESP_OK;
}
-int cgroup_function_systemd_top(BUFFER *wb, const char *function __maybe_unused) {
+int cgroup_function_systemd_top(BUFFER *wb, const char *function __maybe_unused, BUFFER *payload __maybe_unused, const char *source __maybe_unused) {
buffer_flush(wb);
wb->content_type = CT_APPLICATION_JSON;
buffer_json_initialize(wb, "\"", "\"", 0, true, BUFFER_JSON_OPTIONS_DEFAULT);
diff --git a/src/collectors/cgroups.plugin/sys_fs_cgroup.c b/src/collectors/cgroups.plugin/sys_fs_cgroup.c
index 5fdefa863..d41575fa6 100644
--- a/src/collectors/cgroups.plugin/sys_fs_cgroup.c
+++ b/src/collectors/cgroups.plugin/sys_fs_cgroup.c
@@ -39,7 +39,7 @@ SIMPLE_PATTERN *search_cgroup_paths = NULL;
SIMPLE_PATTERN *enabled_cgroup_renames = NULL;
SIMPLE_PATTERN *systemd_services_cgroups = NULL;
SIMPLE_PATTERN *entrypoint_parent_process_comm = NULL;
-char *cgroups_network_interface_script = NULL;
+const char *cgroups_network_interface_script = NULL;
int cgroups_check = 0;
uint32_t Read_hash = 0;
uint32_t Write_hash = 0;
@@ -82,7 +82,7 @@ static enum cgroups_systemd_setting cgroups_detect_systemd(const char *exec)
return retval;
struct pollfd pfd;
- pfd.fd = spawn_server_instance_read_fd(pi->si);
+ pfd.fd = spawn_popen_read_fd(pi);
pfd.events = POLLIN;
int timeout = 3000; // milliseconds
@@ -93,7 +93,7 @@ static enum cgroups_systemd_setting cgroups_detect_systemd(const char *exec)
} else if (ret == 0) {
collector_info("Cannot get the output of \"%s\" within timeout (%d ms)", exec, timeout);
} else {
- while (fgets(buf, MAXSIZE_PROC_CMDLINE, pi->child_stdout_fp) != NULL) {
+ while (fgets(buf, MAXSIZE_PROC_CMDLINE, spawn_popen_stdout(pi)) != NULL) {
if ((begin = strstr(buf, SYSTEMD_HIERARCHY_STRING))) {
end = begin = begin + strlen(SYSTEMD_HIERARCHY_STRING);
if (!*begin)
@@ -153,18 +153,18 @@ static enum cgroups_type cgroups_try_detect_version()
int cgroups2_available = 0;
// 1. check if cgroups2 available on system at all
- POPEN_INSTANCE *instance = spawn_popen_run("grep cgroup /proc/filesystems");
- if(!instance) {
+ POPEN_INSTANCE *pi = spawn_popen_run("grep cgroup /proc/filesystems");
+ if(!pi) {
collector_error("cannot run 'grep cgroup /proc/filesystems'");
return CGROUPS_AUTODETECT_FAIL;
}
- while (fgets(buf, MAXSIZE_PROC_CMDLINE, instance->child_stdout_fp) != NULL) {
+ while (fgets(buf, MAXSIZE_PROC_CMDLINE, spawn_popen_stdout(pi)) != NULL) {
if (strstr(buf, "cgroup2")) {
cgroups2_available = 1;
break;
}
}
- if(spawn_popen_wait(instance) != 0)
+ if(spawn_popen_wait(pi) != 0)
return CGROUPS_AUTODETECT_FAIL;
if(!cgroups2_available)
@@ -229,13 +229,17 @@ void read_cgroup_plugin_configuration() {
throttled_time_hash = simple_hash("throttled_time");
throttled_usec_hash = simple_hash("throttled_usec");
- cgroup_update_every = (int)config_get_number("plugin:cgroups", "update every", localhost->rrd_update_every);
- if(cgroup_update_every < localhost->rrd_update_every)
+ cgroup_update_every = (int)config_get_duration_seconds("plugin:cgroups", "update every", localhost->rrd_update_every);
+ if(cgroup_update_every < localhost->rrd_update_every) {
cgroup_update_every = localhost->rrd_update_every;
+ config_set_duration_seconds("plugin:cgroups", "update every", localhost->rrd_update_every);
+ }
- cgroup_check_for_new_every = (int)config_get_number("plugin:cgroups", "check for new cgroups every", cgroup_check_for_new_every);
- if(cgroup_check_for_new_every < cgroup_update_every)
+ cgroup_check_for_new_every = (int)config_get_duration_seconds("plugin:cgroups", "check for new cgroups every", cgroup_check_for_new_every);
+ if(cgroup_check_for_new_every < cgroup_update_every) {
cgroup_check_for_new_every = cgroup_update_every;
+ config_set_duration_seconds("plugin:cgroups", "check for new cgroups every", cgroup_check_for_new_every);
+ }
cgroup_use_unified_cgroups = config_get_boolean_ondemand("plugin:cgroups", "use unified cgroups", CONFIG_BOOLEAN_AUTO);
if (cgroup_use_unified_cgroups == CONFIG_BOOLEAN_AUTO)
@@ -1401,24 +1405,25 @@ void *cgroups_main(void *ptr) {
cgroup_netdev_link_init();
rrd_function_add_inline(localhost, NULL, "containers-vms", 10,
- RRDFUNCTIONS_PRIORITY_DEFAULT / 2, RRDFUNCTIONS_CGTOP_HELP,
+ RRDFUNCTIONS_PRIORITY_DEFAULT / 2, RRDFUNCTIONS_VERSION_DEFAULT,
+ RRDFUNCTIONS_CGTOP_HELP,
"top", HTTP_ACCESS_ANONYMOUS_DATA,
cgroup_function_cgroup_top);
rrd_function_add_inline(localhost, NULL, "systemd-services", 10,
- RRDFUNCTIONS_PRIORITY_DEFAULT / 3, RRDFUNCTIONS_SYSTEMD_SERVICES_HELP,
+ RRDFUNCTIONS_PRIORITY_DEFAULT / 3, RRDFUNCTIONS_VERSION_DEFAULT,
+ RRDFUNCTIONS_SYSTEMD_SERVICES_HELP,
"top", HTTP_ACCESS_ANONYMOUS_DATA,
cgroup_function_systemd_top);
heartbeat_t hb;
- heartbeat_init(&hb);
- usec_t step = cgroup_update_every * USEC_PER_SEC;
+ heartbeat_init(&hb, cgroup_update_every * USEC_PER_SEC);
usec_t find_every = cgroup_check_for_new_every * USEC_PER_SEC, find_dt = 0;
while(service_running(SERVICE_COLLECTORS)) {
worker_is_idle();
- usec_t hb_dt = heartbeat_next(&hb, step);
+ usec_t hb_dt = heartbeat_next(&hb);
if (unlikely(!service_running(SERVICE_COLLECTORS)))
break;
diff --git a/src/collectors/charts.d.plugin/README.md b/src/collectors/charts.d.plugin/README.md
index 3558985db..309f60e63 100644
--- a/src/collectors/charts.d.plugin/README.md
+++ b/src/collectors/charts.d.plugin/README.md
@@ -7,8 +7,7 @@
3. It communicates with Netdata via a unidirectional pipe (sending data to the `netdata` daemon)
4. Supports any number of data collection **modules**
-To better understand the guidelines and the API behind our External plugins, please have a look at the [Introduction to External plugins](/src/collectors/plugins.d/README.md) prior to reading this page.
-
+To better understand the guidelines and the API behind our External plugins, please have a look at the [Introduction to External plugins](/src/plugins.d/README.md) prior to reading this page.
`charts.d.plugin` has been designed so that the actual script that will do data collection will be permanently in
memory, collecting data with as little overheads as possible
@@ -21,11 +20,11 @@ By default, `charts.d.plugin` is not included as part of the install when using
## Configuration
-`charts.d.plugin` itself can be [configured](/docs/netdata-agent/configuration/README.md#edit-netdataconf)using the configuration file `/etc/netdata/charts.d.conf`. This file is also a BASH script.
+`charts.d.plugin` itself can be [configured](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config)using the configuration file `/etc/netdata/charts.d.conf`. This file is also a BASH script.
In this file, you can place statements like this:
-```conf
+```text
enable_all_charts="yes"
X="yes"
Y="no"
@@ -121,7 +120,7 @@ Using the above, if the command `mysql` is not available in the system, the `mys
`fixid()` will get a string and return a properly formatted id for a chart or dimension.
This is an expensive function that should not be used in `X_update()`.
-You can keep the generated id in a BASH associative array to have the values availables in `X_update()`, like this:
+You can keep the generated id in a BASH associative array to have the values available in `X_update()`, like this:
```sh
declare -A X_ids=()
diff --git a/src/collectors/charts.d.plugin/apcupsd/README.md b/src/collectors/charts.d.plugin/apcupsd/README.md
deleted file mode 120000
index fc6681fe6..000000000
--- a/src/collectors/charts.d.plugin/apcupsd/README.md
+++ /dev/null
@@ -1 +0,0 @@
-integrations/apc_ups.md \ No newline at end of file
diff --git a/src/collectors/charts.d.plugin/apcupsd/apcupsd.chart.sh b/src/collectors/charts.d.plugin/apcupsd/apcupsd.chart.sh
deleted file mode 100644
index 58132024b..000000000
--- a/src/collectors/charts.d.plugin/apcupsd/apcupsd.chart.sh
+++ /dev/null
@@ -1,306 +0,0 @@
-# shellcheck shell=bash
-# no need for shebang - this file is loaded from charts.d.plugin
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# netdata
-# real-time performance and health monitoring, done right!
-# (C) 2016 Costa Tsaousis <costa@tsaousis.gr>
-#
-
-apcupsd_ip=
-apcupsd_port=
-
-declare -A apcupsd_sources=(
- ["local"]="127.0.0.1:3551"
-)
-
-# how frequently to collect UPS data
-apcupsd_update_every=10
-
-apcupsd_timeout=3
-
-# the priority of apcupsd related to other charts
-apcupsd_priority=90000
-
-apcupsd_get() {
- run -t $apcupsd_timeout apcaccess status "$1"
-}
-
-is_ups_alive() {
- local status
- status="$(apcupsd_get "$1" | sed -e 's/STATUS.*: //' -e 't' -e 'd')"
- case "$status" in
- "" | "COMMLOST" | "SHUTTING DOWN") return 1 ;;
- *) return 0 ;;
- esac
-}
-
-apcupsd_check() {
-
- # this should return:
- # - 0 to enable the chart
- # - 1 to disable the chart
-
- require_cmd apcaccess || return 1
-
- # backwards compatibility
- if [ "${apcupsd_ip}:${apcupsd_port}" != ":" ]; then
- apcupsd_sources["local"]="${apcupsd_ip}:${apcupsd_port}"
- fi
-
- local host working=0 failed=0
- for host in "${!apcupsd_sources[@]}"; do
- apcupsd_get "${apcupsd_sources[${host}]}" >/dev/null
- # shellcheck disable=2181
- if [ $? -ne 0 ]; then
- error "cannot get information for apcupsd server ${host} on ${apcupsd_sources[${host}]}."
- failed=$((failed + 1))
- else
- if ! is_ups_alive ${apcupsd_sources[${host}]}; then
- error "APC UPS ${host} on ${apcupsd_sources[${host}]} is not online."
- failed=$((failed + 1))
- else
- working=$((working + 1))
- fi
- fi
- done
-
- if [ ${working} -eq 0 ]; then
- error "No APC UPSes found available."
- return 1
- fi
-
- return 0
-}
-
-apcupsd_create() {
- local host
- for host in "${!apcupsd_sources[@]}"; do
- # create the charts
- cat <<EOF
-CHART apcupsd_${host}.charge '' "UPS Charge" "percentage" ups apcupsd.charge area $((apcupsd_priority + 2)) $apcupsd_update_every '' '' 'apcupsd'
-DIMENSION battery_charge charge absolute 1 100
-
-CHART apcupsd_${host}.battery_voltage '' "UPS Battery Voltage" "Volts" ups apcupsd.battery.voltage line $((apcupsd_priority + 4)) $apcupsd_update_every '' '' 'apcupsd'
-DIMENSION battery_voltage voltage absolute 1 100
-DIMENSION battery_voltage_nominal nominal absolute 1 100
-
-CHART apcupsd_${host}.input_voltage '' "UPS Input Voltage" "Volts" input apcupsd.input.voltage line $((apcupsd_priority + 5)) $apcupsd_update_every '' '' 'apcupsd'
-DIMENSION input_voltage voltage absolute 1 100
-DIMENSION input_voltage_min min absolute 1 100
-DIMENSION input_voltage_max max absolute 1 100
-
-CHART apcupsd_${host}.input_frequency '' "UPS Input Frequency" "Hz" input apcupsd.input.frequency line $((apcupsd_priority + 6)) $apcupsd_update_every '' '' 'apcupsd'
-DIMENSION input_frequency frequency absolute 1 100
-
-CHART apcupsd_${host}.output_voltage '' "UPS Output Voltage" "Volts" output apcupsd.output.voltage line $((apcupsd_priority + 7)) $apcupsd_update_every '' '' 'apcupsd'
-DIMENSION output_voltage voltage absolute 1 100
-DIMENSION output_voltage_nominal nominal absolute 1 100
-
-CHART apcupsd_${host}.load '' "UPS Load" "percentage" ups apcupsd.load area $((apcupsd_priority)) $apcupsd_update_every '' '' 'apcupsd'
-DIMENSION load load absolute 1 100
-
-CHART apcupsd_${host}.load_usage '' "UPS Load Usage" "Watts" ups apcupsd.load_usage area $((apcupsd_priority + 1)) $apcupsd_update_every '' '' 'apcupsd'
-DIMENSION load_usage load absolute 1 100
-
-CHART apcupsd_${host}.temp '' "UPS Temperature" "Celsius" ups apcupsd.temperature line $((apcupsd_priority + 8)) $apcupsd_update_every '' '' 'apcupsd'
-DIMENSION temp temp absolute 1 100
-
-CHART apcupsd_${host}.time '' "UPS Time Remaining" "Minutes" ups apcupsd.time area $((apcupsd_priority + 3)) $apcupsd_update_every '' '' 'apcupsd'
-DIMENSION time time absolute 1 100
-
-CHART apcupsd_${host}.online '' "UPS ONLINE flag" "boolean" ups apcupsd.online line $((apcupsd_priority + 9)) $apcupsd_update_every '' '' 'apcupsd'
-DIMENSION online online absolute 1 1
-
-CHART apcupsd_${host}.selftest '' "UPS Self-Test status" "status" ups apcupsd.selftest line $((apcupsd_priority + 10)) $apcupsd_update_every '' '' 'apcupsd'
-DIMENSION selftest_OK 'OK' absolute 1 1
-DIMENSION selftest_NO 'NO' absolute 1 1
-DIMENSION selftest_BT 'BT' absolute 1 1
-DIMENSION selftest_NG 'NG' absolute 1 1
-
-CHART apcupsd_${host}.status '' "UPS Status" "status" ups apcupsd.status line $((apcupsd_priority + 11)) $apcupsd_update_every '' '' 'apcupsd'
-DIMENSION status_ONLINE 'ONLINE' absolute 1 1
-DIMENSION status_ONBATT 'ONBATT' absolute 1 1
-DIMENSION status_OVERLOAD 'OVERLOAD' absolute 1 1
-DIMENSION status_LOWBATT 'LOWBATT' absolute 1 1
-DIMENSION status_REPLACEBATT 'REPLACEBATT' absolute 1 1
-DIMENSION status_NOBATT 'NOBATT' absolute 1 1
-DIMENSION status_SLAVE 'SLAVE' absolute 1 1
-DIMENSION status_SLAVEDOWN 'SLAVEDOWN' absolute 1 1
-DIMENSION status_COMMLOST 'COMMLOST' absolute 1 1
-DIMENSION status_CAL 'CAL' absolute 1 1
-DIMENSION status_TRIM 'TRIM' absolute 1 1
-DIMENSION status_BOOST 'BOOST' absolute 1 1
-DIMENSION status_SHUTTING_DOWN 'SHUTTING_DOWN' absolute 1 1
-
-EOF
- done
- return 0
-}
-
-apcupsd_update() {
- # the first argument to this function is the microseconds since last update
- # pass this parameter to the BEGIN statement (see below).
-
- # do all the work to collect / calculate the values
- # for each dimension
- # remember: KEEP IT SIMPLE AND SHORT
-
- local host working=0 failed=0
- for host in "${!apcupsd_sources[@]}"; do
- apcupsd_get "${apcupsd_sources[${host}]}" | awk "
-
-BEGIN {
- battery_charge = 0;
- battery_voltage = 0;
- battery_voltage_nominal = 0;
- input_voltage = 0;
- input_voltage_min = 0;
- input_voltage_max = 0;
- input_frequency = 0;
- output_voltage = 0;
- output_voltage_nominal = 0;
- load = 0;
- temp = 0;
- time = 0;
- online = 0;
- nompower = 0;
- load_usage = 0;
- selftest_OK = 0;
- selftest_NO = 0;
- selftest_BT = 0;
- selftest_NG = 0;
- status_ONLINE = 0;
- status_CAL = 0;
- status_TRIM = 0;
- status_BOOST = 0;
- status_ONBATT = 0;
- status_OVERLOAD = 0;
- status_LOWBATT = 0;
- status_REPLACEBATT = 0;
- status_NOBATT = 0;
- status_SLAVE = 0;
- status_SLAVEDOWN = 0;
- status_COMMLOST = 0;
- status_SHUTTING_DOWN = 0;
-
-}
-/^BCHARGE.*/ { battery_charge = \$3 * 100 };
-/^BATTV.*/ { battery_voltage = \$3 * 100 };
-/^NOMBATTV.*/ { battery_voltage_nominal = \$3 * 100 };
-/^LINEV.*/ { input_voltage = \$3 * 100 };
-/^MINLINEV.*/ { input_voltage_min = \$3 * 100 };
-/^MAXLINEV.*/ { input_voltage_max = \$3 * 100 };
-/^LINEFREQ.*/ { input_frequency = \$3 * 100 };
-/^OUTPUTV.*/ { output_voltage = \$3 * 100 };
-/^NOMOUTV.*/ { output_voltage_nominal = \$3 * 100 };
-/^LOADPCT.*/ { load = \$3 * 100 };
-/^ITEMP.*/ { temp = \$3 * 100 };
-/^NOMPOWER.*/ { nompower = \$3 };
-/^TIMELEFT.*/ { time = \$3 * 100 };
-/^STATUS.*/ { online=(\$0 !~ \"COMMLOST\" && \$0 !~ \"SHUTTING\") ? 1 : 0; };
-/^SELFTEST.*/ { selftest_OK = (\$3 == \"OK\") ? 1 : 0;
- selftest_NO = (\$3 == \"NO\") ? 1 : 0;
- selftest_BT = (\$3 == \"BT\") ? 1 : 0;
- selftest_NG = (\$3 == \"NG\") ? 1 : 0;
- };
-/^STATUS.*/ { status_ONLINE = (\$0 ~ \"ONLINE\") ? 1 : 0;
- status_CAL = (\$0 ~ \"CAL\") ? 1 : 0;
- status_TRIM = (\$0 ~ \"TRIM\") ? 1 : 0;
- status_BOOST = (\$0 ~ \"BOOST\") ? 1 : 0;
- status_ONBATT = (\$0 ~ \"ONBATT\") ? 1 : 0;
- status_OVERLOAD = (\$0 ~ \"OVERLOAD\") ? 1 : 0;
- status_LOWBATT = (\$0 ~ \"LOWBATT\") ? 1 : 0;
- status_REPLACEBATT = (\$0 ~ \"REPLACEBATT\") ? 1 : 0;
- status_NOBATT = (\$0 ~ \"NOBATT\") ? 1 : 0;
- status_SLAVE = (\$0 ~ \"SLAVE( |$)\") ? 1 : 0;
- status_SLAVEDOWN = (\$0 ~ \"SLAVEDOWN\") ? 1 : 0;
- status_COMMLOST = (\$0 ~ \"COMMLOST\") ? 1 : 0;
- status_SHUTTING_DOWN = (\$0 ~ \"SHUTTING\") ? 1 : 0;
- };
-
-END {
- { load_usage = nompower * load / 100 };
-
- print \"BEGIN apcupsd_${host}.online $1\";
- print \"SET online = \" online;
- print \"END\"
-
- if (online == 1) {
- print \"BEGIN apcupsd_${host}.charge $1\";
- print \"SET battery_charge = \" battery_charge;
- print \"END\"
-
- print \"BEGIN apcupsd_${host}.battery_voltage $1\";
- print \"SET battery_voltage = \" battery_voltage;
- print \"SET battery_voltage_nominal = \" battery_voltage_nominal;
- print \"END\"
-
- print \"BEGIN apcupsd_${host}.input_voltage $1\";
- print \"SET input_voltage = \" input_voltage;
- print \"SET input_voltage_min = \" input_voltage_min;
- print \"SET input_voltage_max = \" input_voltage_max;
- print \"END\"
-
- print \"BEGIN apcupsd_${host}.input_frequency $1\";
- print \"SET input_frequency = \" input_frequency;
- print \"END\"
-
- print \"BEGIN apcupsd_${host}.output_voltage $1\";
- print \"SET output_voltage = \" output_voltage;
- print \"SET output_voltage_nominal = \" output_voltage_nominal;
- print \"END\"
-
- print \"BEGIN apcupsd_${host}.load $1\";
- print \"SET load = \" load;
- print \"END\"
-
- print \"BEGIN apcupsd_${host}.load_usage $1\";
- print \"SET load_usage = \" load_usage;
- print \"END\"
-
- print \"BEGIN apcupsd_${host}.temp $1\";
- print \"SET temp = \" temp;
- print \"END\"
-
- print \"BEGIN apcupsd_${host}.time $1\";
- print \"SET time = \" time;
- print \"END\"
-
- print \"BEGIN apcupsd_${host}.selftest $1\";
- print \"SET selftest_OK = \" selftest_OK;
- print \"SET selftest_NO = \" selftest_NO;
- print \"SET selftest_BT = \" selftest_BT;
- print \"SET selftest_NG = \" selftest_NG;
- print \"END\"
-
- print \"BEGIN apcupsd_${host}.status $1\";
- print \"SET status_ONLINE = \" status_ONLINE;
- print \"SET status_ONBATT = \" status_ONBATT;
- print \"SET status_OVERLOAD = \" status_OVERLOAD;
- print \"SET status_LOWBATT = \" status_LOWBATT;
- print \"SET status_REPLACEBATT = \" status_REPLACEBATT;
- print \"SET status_NOBATT = \" status_NOBATT;
- print \"SET status_SLAVE = \" status_SLAVE;
- print \"SET status_SLAVEDOWN = \" status_SLAVEDOWN;
- print \"SET status_COMMLOST = \" status_COMMLOST;
- print \"SET status_CAL = \" status_CAL;
- print \"SET status_TRIM = \" status_TRIM;
- print \"SET status_BOOST = \" status_BOOST;
- print \"SET status_SHUTTING_DOWN = \" status_SHUTTING_DOWN;
- print \"END\";
- }
-}"
- # shellcheck disable=SC2181
- if [ $? -ne 0 ]; then
- failed=$((failed + 1))
- error "failed to get values for APC UPS ${host} on ${apcupsd_sources[${host}]}" && return 1
- else
- working=$((working + 1))
- fi
- done
-
- [ $working -eq 0 ] && error "failed to get values from all APC UPSes" && return 1
-
- return 0
-}
diff --git a/src/collectors/charts.d.plugin/apcupsd/apcupsd.conf b/src/collectors/charts.d.plugin/apcupsd/apcupsd.conf
deleted file mode 100644
index 679c0d61b..000000000
--- a/src/collectors/charts.d.plugin/apcupsd/apcupsd.conf
+++ /dev/null
@@ -1,25 +0,0 @@
-# no need for shebang - this file is loaded from charts.d.plugin
-
-# netdata
-# real-time performance and health monitoring, done right!
-# (C) 2018 Costa Tsaousis <costa@tsaousis.gr>
-# GPL v3+
-
-# add all your APC UPSes in this array - uncomment it too
-#declare -A apcupsd_sources=(
-# ["local"]="127.0.0.1:3551"
-#)
-
-# how long to wait for apcupsd to respond
-#apcupsd_timeout=3
-
-# the data collection frequency
-# if unset, will inherit the netdata update frequency
-#apcupsd_update_every=10
-
-# the charts priority on the dashboard
-#apcupsd_priority=90000
-
-# the number of retries to do in case of failure
-# before disabling the module
-#apcupsd_retries=10
diff --git a/src/collectors/charts.d.plugin/apcupsd/integrations/apc_ups.md b/src/collectors/charts.d.plugin/apcupsd/integrations/apc_ups.md
deleted file mode 100644
index fdf1ccc9e..000000000
--- a/src/collectors/charts.d.plugin/apcupsd/integrations/apc_ups.md
+++ /dev/null
@@ -1,237 +0,0 @@
-<!--startmeta
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/collectors/charts.d.plugin/apcupsd/README.md"
-meta_yaml: "https://github.com/netdata/netdata/edit/master/src/collectors/charts.d.plugin/apcupsd/metadata.yaml"
-sidebar_label: "APC UPS"
-learn_status: "Published"
-learn_rel_path: "Collecting Metrics/UPS"
-most_popular: False
-message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
-endmeta-->
-
-# APC UPS
-
-
-<img src="https://netdata.cloud/img/apc.svg" width="150"/>
-
-
-Plugin: charts.d.plugin
-Module: apcupsd
-
-<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
-
-## Overview
-
-Monitor APC UPS performance with Netdata for optimal uninterruptible power supply operations. Enhance your power supply reliability with real-time APC UPS metrics.
-
-The collector uses the `apcaccess` tool to contact the `apcupsd` daemon and get the APC UPS statistics.
-
-This collector is supported on all platforms.
-
-This collector only supports collecting metrics from a single instance of this integration.
-
-
-### Default Behavior
-
-#### Auto-Detection
-
-By default, with no configuration provided, the collector will try to contact 127.0.0.1:3551 with using the `apcaccess` utility.
-
-#### Limits
-
-The default configuration for this integration does not impose any limits on data collection.
-
-#### Performance Impact
-
-The default configuration for this integration is not expected to impose a significant performance impact on the system.
-
-
-## Metrics
-
-Metrics grouped by *scope*.
-
-The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
-
-
-
-### Per ups
-
-Metrics related to UPS. Each UPS provides its own set of the following metrics.
-
-This scope has no labels.
-
-Metrics:
-
-| Metric | Dimensions | Unit |
-|:------|:----------|:----|
-| apcupsd.charge | charge | percentage |
-| apcupsd.battery.voltage | voltage, nominal | Volts |
-| apcupsd.input.voltage | voltage, min, max | Volts |
-| apcupsd.output.voltage | absolute, nominal | Volts |
-| apcupsd.input.frequency | frequency | Hz |
-| apcupsd.load | load | percentage |
-| apcupsd.load_usage | load | Watts |
-| apcupsd.temperature | temp | Celsius |
-| apcupsd.time | time | Minutes |
-| apcupsd.online | online | boolean |
-| apcupsd.selftest | OK, NO, BT, NG | status |
-| apcupsd.status | ONLINE, ONBATT, OVERLOAD, LOWBATT, REPLACEBATT, NOBATT, SLAVE, SLAVEDOWN, COMMLOST, CAL, TRIM, BOOST, SHUTTING_DOWN | status |
-
-
-
-## Alerts
-
-
-The following alerts are available:
-
-| Alert name | On metric | Description |
-|:------------|:----------|:------------|
-| [ apcupsd_ups_charge ](https://github.com/netdata/netdata/blob/master/src/health/health.d/apcupsd.conf) | apcupsd.charge | average UPS charge over the last minute |
-| [ apcupsd_10min_ups_load ](https://github.com/netdata/netdata/blob/master/src/health/health.d/apcupsd.conf) | apcupsd.load | average UPS load over the last 10 minutes |
-| [ apcupsd_last_collected_secs ](https://github.com/netdata/netdata/blob/master/src/health/health.d/apcupsd.conf) | apcupsd.load | number of seconds since the last successful data collection |
-| [ apcupsd_selftest_warning ](https://github.com/netdata/netdata/blob/master/src/health/health.d/apcupsd.conf) | apcupsd.selftest | self-test failed due to insufficient battery capacity or due to overload. |
-| [ apcupsd_status_onbatt ](https://github.com/netdata/netdata/blob/master/src/health/health.d/apcupsd.conf) | apcupsd.status | APC UPS has switched to battery power because the input power has failed |
-| [ apcupsd_status_overload ](https://github.com/netdata/netdata/blob/master/src/health/health.d/apcupsd.conf) | apcupsd.status | APC UPS is overloaded and cannot supply enough power to the load |
-| [ apcupsd_status_lowbatt ](https://github.com/netdata/netdata/blob/master/src/health/health.d/apcupsd.conf) | apcupsd.status | APC UPS battery is low and needs to be recharged |
-| [ apcupsd_status_replacebatt ](https://github.com/netdata/netdata/blob/master/src/health/health.d/apcupsd.conf) | apcupsd.status | APC UPS battery has reached the end of its lifespan and needs to be replaced |
-| [ apcupsd_status_nobatt ](https://github.com/netdata/netdata/blob/master/src/health/health.d/apcupsd.conf) | apcupsd.status | APC UPS has no battery |
-| [ apcupsd_status_commlost ](https://github.com/netdata/netdata/blob/master/src/health/health.d/apcupsd.conf) | apcupsd.status | APC UPS communication link is lost |
-
-
-## Setup
-
-### Prerequisites
-
-#### Install charts.d plugin
-
-If [using our official native DEB/RPM packages](/packaging/installer/UPDATE.md#determine-which-installation-method-you-used), make sure `netdata-plugin-chartsd` is installed.
-
-
-#### Required software
-
-Make sure the `apcaccess` and `apcupsd` are installed and running.
-
-
-### Configuration
-
-#### File
-
-The configuration file name for this integration is `charts.d/apcupsd.conf`.
-
-
-You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
-
-```bash
-cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
-sudo ./edit-config charts.d/apcupsd.conf
-```
-#### Options
-
-The config file is sourced by the charts.d plugin. It's a standard bash file.
-
-The following collapsed table contains all the options that can be configured for the apcupsd collector.
-
-
-<details open><summary>Config options</summary>
-
-| Name | Description | Default | Required |
-|:----|:-----------|:-------|:--------:|
-| apcupsd_sources | This is an array of apcupsd sources. You can have multiple entries there. Please refer to the example below on how to set it. | 127.0.0.1:3551 | no |
-| apcupsd_timeout | How long to wait for apcupsd to respond. | 3 | no |
-| apcupsd_update_every | The data collection frequency. If unset, will inherit the netdata update frequency. | 1 | no |
-| apcupsd_priority | The charts priority on the dashboard. | 90000 | no |
-| apcupsd_retries | The number of retries to do in case of failure before disabling the collector. | 10 | no |
-
-</details>
-
-#### Examples
-
-##### Multiple apcupsd sources
-
-Specify a multiple apcupsd sources along with a custom update interval
-
-```yaml
-# add all your APC UPSes in this array - uncomment it too
-declare -A apcupsd_sources=(
- ["local"]="127.0.0.1:3551",
- ["remote"]="1.2.3.4:3551"
-)
-
-# how long to wait for apcupsd to respond
-#apcupsd_timeout=3
-
-# the data collection frequency
-# if unset, will inherit the netdata update frequency
-apcupsd_update_every=5
-
-# the charts priority on the dashboard
-#apcupsd_priority=90000
-
-# the number of retries to do in case of failure
-# before disabling the module
-#apcupsd_retries=10
-
-```
-
-
-## Troubleshooting
-
-### Debug Mode
-
-
-To troubleshoot issues with the `apcupsd` collector, run the `charts.d.plugin` with the debug option enabled. The output
-should give you clues as to why the collector isn't working.
-
-- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
- your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
-
- ```bash
- cd /usr/libexec/netdata/plugins.d/
- ```
-
-- Switch to the `netdata` user.
-
- ```bash
- sudo -u netdata -s
- ```
-
-- Run the `charts.d.plugin` to debug the collector:
-
- ```bash
- ./charts.d.plugin debug 1 apcupsd
- ```
-
-### Getting Logs
-
-If you're encountering problems with the `apcupsd` collector, follow these steps to retrieve logs and identify potential issues:
-
-- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
-- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
-
-#### System with systemd
-
-Use the following command to view logs generated since the last Netdata service restart:
-
-```bash
-journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep apcupsd
-```
-
-#### System without systemd
-
-Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
-
-```bash
-grep apcupsd /var/log/netdata/collector.log
-```
-
-**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
-
-#### Docker Container
-
-If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
-
-```bash
-docker logs netdata 2>&1 | grep apcupsd
-```
-
-
diff --git a/src/collectors/charts.d.plugin/apcupsd/metadata.yaml b/src/collectors/charts.d.plugin/apcupsd/metadata.yaml
deleted file mode 100644
index 01d86e795..000000000
--- a/src/collectors/charts.d.plugin/apcupsd/metadata.yaml
+++ /dev/null
@@ -1,256 +0,0 @@
-plugin_name: charts.d.plugin
-modules:
- - meta:
- plugin_name: charts.d.plugin
- module_name: apcupsd
- monitored_instance:
- name: APC UPS
- link: "https://www.apc.com"
- categories:
- - data-collection.ups
- icon_filename: "apc.svg"
- related_resources:
- integrations:
- list: []
- info_provided_to_referring_integrations:
- description: ""
- keywords:
- - ups
- - apc
- - power
- - supply
- - battery
- - apcupsd
- most_popular: false
- overview:
- data_collection:
- metrics_description: "Monitor APC UPS performance with Netdata for optimal uninterruptible power supply operations. Enhance your power supply reliability with real-time APC UPS metrics."
- method_description: "The collector uses the `apcaccess` tool to contact the `apcupsd` daemon and get the APC UPS statistics."
- supported_platforms:
- include: []
- exclude: []
- multi_instance: false
- additional_permissions:
- description: ""
- default_behavior:
- auto_detection:
- description: "By default, with no configuration provided, the collector will try to contact 127.0.0.1:3551 with using the `apcaccess` utility."
- limits:
- description: ""
- performance_impact:
- description: ""
- setup:
- prerequisites:
- list:
- - title: "Install charts.d plugin"
- description: |
- If [using our official native DEB/RPM packages](/packaging/installer/UPDATE.md#determine-which-installation-method-you-used), make sure `netdata-plugin-chartsd` is installed.
- - title: "Required software"
- description: "Make sure the `apcaccess` and `apcupsd` are installed and running."
- configuration:
- file:
- name: charts.d/apcupsd.conf
- options:
- description: |
- The config file is sourced by the charts.d plugin. It's a standard bash file.
-
- The following collapsed table contains all the options that can be configured for the apcupsd collector.
- folding:
- title: "Config options"
- enabled: true
- list:
- - name: apcupsd_sources
- description: This is an array of apcupsd sources. You can have multiple entries there. Please refer to the example below on how to set it.
- default_value: "127.0.0.1:3551"
- required: false
- - name: apcupsd_timeout
- description: How long to wait for apcupsd to respond.
- default_value: 3
- required: false
- - name: apcupsd_update_every
- description: The data collection frequency. If unset, will inherit the netdata update frequency.
- default_value: 1
- required: false
- - name: apcupsd_priority
- description: The charts priority on the dashboard.
- default_value: 90000
- required: false
- - name: apcupsd_retries
- description: The number of retries to do in case of failure before disabling the collector.
- default_value: 10
- required: false
- examples:
- folding:
- enabled: false
- title: "Config"
- list:
- - name: Multiple apcupsd sources
- description: Specify a multiple apcupsd sources along with a custom update interval
- config: |
- # add all your APC UPSes in this array - uncomment it too
- declare -A apcupsd_sources=(
- ["local"]="127.0.0.1:3551",
- ["remote"]="1.2.3.4:3551"
- )
-
- # how long to wait for apcupsd to respond
- #apcupsd_timeout=3
-
- # the data collection frequency
- # if unset, will inherit the netdata update frequency
- apcupsd_update_every=5
-
- # the charts priority on the dashboard
- #apcupsd_priority=90000
-
- # the number of retries to do in case of failure
- # before disabling the module
- #apcupsd_retries=10
- troubleshooting:
- problems:
- list: []
- alerts:
- - name: apcupsd_ups_charge
- link: https://github.com/netdata/netdata/blob/master/src/health/health.d/apcupsd.conf
- metric: apcupsd.charge
- info: average UPS charge over the last minute
- os: "*"
- - name: apcupsd_10min_ups_load
- link: https://github.com/netdata/netdata/blob/master/src/health/health.d/apcupsd.conf
- metric: apcupsd.load
- info: average UPS load over the last 10 minutes
- os: "*"
- - name: apcupsd_last_collected_secs
- link: https://github.com/netdata/netdata/blob/master/src/health/health.d/apcupsd.conf
- metric: apcupsd.load
- info: number of seconds since the last successful data collection
- - name: apcupsd_selftest_warning
- link: https://github.com/netdata/netdata/blob/master/src/health/health.d/apcupsd.conf
- metric: apcupsd.selftest
- info: self-test failed due to insufficient battery capacity or due to overload.
- - name: apcupsd_status_onbatt
- link: https://github.com/netdata/netdata/blob/master/src/health/health.d/apcupsd.conf
- metric: apcupsd.status
- info: APC UPS has switched to battery power because the input power has failed
- - name: apcupsd_status_overload
- link: https://github.com/netdata/netdata/blob/master/src/health/health.d/apcupsd.conf
- metric: apcupsd.status
- info: APC UPS is overloaded and cannot supply enough power to the load
- - name: apcupsd_status_lowbatt
- link: https://github.com/netdata/netdata/blob/master/src/health/health.d/apcupsd.conf
- metric: apcupsd.status
- info: APC UPS battery is low and needs to be recharged
- - name: apcupsd_status_replacebatt
- link: https://github.com/netdata/netdata/blob/master/src/health/health.d/apcupsd.conf
- metric: apcupsd.status
- info: APC UPS battery has reached the end of its lifespan and needs to be replaced
- - name: apcupsd_status_nobatt
- link: https://github.com/netdata/netdata/blob/master/src/health/health.d/apcupsd.conf
- metric: apcupsd.status
- info: APC UPS has no battery
- - name: apcupsd_status_commlost
- link: https://github.com/netdata/netdata/blob/master/src/health/health.d/apcupsd.conf
- metric: apcupsd.status
- info: APC UPS communication link is lost
- metrics:
- folding:
- title: Metrics
- enabled: false
- description: ""
- availability: []
- scopes:
- - name: ups
- description: "Metrics related to UPS. Each UPS provides its own set of the following metrics."
- labels: []
- metrics:
- - name: apcupsd.charge
- description: UPS Charge
- unit: "percentage"
- chart_type: area
- dimensions:
- - name: charge
- - name: apcupsd.battery.voltage
- description: UPS Battery Voltage
- unit: "Volts"
- chart_type: line
- dimensions:
- - name: voltage
- - name: nominal
- - name: apcupsd.input.voltage
- description: UPS Input Voltage
- unit: "Volts"
- chart_type: line
- dimensions:
- - name: voltage
- - name: min
- - name: max
- - name: apcupsd.output.voltage
- description: UPS Output Voltage
- unit: "Volts"
- chart_type: line
- dimensions:
- - name: absolute
- - name: nominal
- - name: apcupsd.input.frequency
- description: UPS Input Voltage
- unit: "Hz"
- chart_type: line
- dimensions:
- - name: frequency
- - name: apcupsd.load
- description: UPS Load
- unit: "percentage"
- chart_type: area
- dimensions:
- - name: load
- - name: apcupsd.load_usage
- description: UPS Load Usage
- unit: "Watts"
- chart_type: area
- dimensions:
- - name: load
- - name: apcupsd.temperature
- description: UPS Temperature
- unit: "Celsius"
- chart_type: line
- dimensions:
- - name: temp
- - name: apcupsd.time
- description: UPS Time Remaining
- unit: "Minutes"
- chart_type: area
- dimensions:
- - name: time
- - name: apcupsd.online
- description: UPS ONLINE flag
- unit: "boolean"
- chart_type: line
- dimensions:
- - name: online
- - name: apcupsd.selftest
- description: UPS Self-Test status
- unit: status
- chart_type: line
- dimensions:
- - name: OK
- - name: NO
- - name: BT
- - name: NG
- - name: apcupsd.status
- description: UPS Status
- unit: status
- chart_type: line
- dimensions:
- - name: ONLINE
- - name: ONBATT
- - name: OVERLOAD
- - name: LOWBATT
- - name: REPLACEBATT
- - name: NOBATT
- - name: SLAVE
- - name: SLAVEDOWN
- - name: COMMLOST
- - name: CAL
- - name: TRIM
- - name: BOOST
- - name: SHUTTING_DOWN
diff --git a/src/collectors/charts.d.plugin/example/README.md b/src/collectors/charts.d.plugin/example/README.md
index a16180581..d676cea77 100644
--- a/src/collectors/charts.d.plugin/example/README.md
+++ b/src/collectors/charts.d.plugin/example/README.md
@@ -1,12 +1,3 @@
-<!--
-title: "Example"
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/collectors/charts.d.plugin/example/README.md"
-sidebar_label: "example-charts.d.plugin"
-learn_status: "Published"
-learn_topic_type: "References"
-learn_rel_path: "Integrations/Monitor/Mock Collectors"
--->
-
# Example
If you want to understand how charts.d data collector functions, check out the [charts.d example](https://raw.githubusercontent.com/netdata/netdata/master/src/collectors/charts.d.plugin/example/example.chart.sh).
diff --git a/src/collectors/charts.d.plugin/libreswan/integrations/libreswan.md b/src/collectors/charts.d.plugin/libreswan/integrations/libreswan.md
index fa8eb7a97..96691443b 100644
--- a/src/collectors/charts.d.plugin/libreswan/integrations/libreswan.md
+++ b/src/collectors/charts.d.plugin/libreswan/integrations/libreswan.md
@@ -79,7 +79,7 @@ There are no alerts configured by default for this integration.
#### Install charts.d plugin
-If [using our official native DEB/RPM packages](/packaging/installer/UPDATE.md#determine-which-installation-method-you-used), make sure `netdata-plugin-chartsd` is installed.
+If [using our official native DEB/RPM packages](https://github.com/netdata/netdata/blob/master/packaging/installer/UPDATE.md#determine-which-installation-method-you-used), make sure `netdata-plugin-chartsd` is installed.
#### Permissions to execute `ipsec`
@@ -115,8 +115,8 @@ Make sure the path `/sbin/ipsec` matches your setup (execute `which ipsec` to fi
The configuration file name for this integration is `charts.d/libreswan.conf`.
-You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the
+Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
```bash
cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
diff --git a/src/collectors/charts.d.plugin/opensips/integrations/opensips.md b/src/collectors/charts.d.plugin/opensips/integrations/opensips.md
index 7fa610eb4..13b573627 100644
--- a/src/collectors/charts.d.plugin/opensips/integrations/opensips.md
+++ b/src/collectors/charts.d.plugin/opensips/integrations/opensips.md
@@ -96,7 +96,7 @@ There are no alerts configured by default for this integration.
#### Install charts.d plugin
-If [using our official native DEB/RPM packages](/packaging/installer/UPDATE.md#determine-which-installation-method-you-used), make sure `netdata-plugin-chartsd` is installed.
+If [using our official native DEB/RPM packages](https://github.com/netdata/netdata/blob/master/packaging/installer/UPDATE.md#determine-which-installation-method-you-used), make sure `netdata-plugin-chartsd` is installed.
#### Required software
@@ -111,8 +111,8 @@ The collector requires the `opensipsctl` to be installed.
The configuration file name for this integration is `charts.d/opensips.conf`.
-You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the
+Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
```bash
cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
diff --git a/src/collectors/charts.d.plugin/sensors/README.md b/src/collectors/charts.d.plugin/sensors/README.md
deleted file mode 120000
index 7e5a416c4..000000000
--- a/src/collectors/charts.d.plugin/sensors/README.md
+++ /dev/null
@@ -1 +0,0 @@
-integrations/linux_sensors_sysfs.md \ No newline at end of file
diff --git a/src/collectors/charts.d.plugin/sensors/integrations/linux_sensors_sysfs.md b/src/collectors/charts.d.plugin/sensors/integrations/linux_sensors_sysfs.md
deleted file mode 100644
index f9221caa1..000000000
--- a/src/collectors/charts.d.plugin/sensors/integrations/linux_sensors_sysfs.md
+++ /dev/null
@@ -1,235 +0,0 @@
-<!--startmeta
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/collectors/charts.d.plugin/sensors/README.md"
-meta_yaml: "https://github.com/netdata/netdata/edit/master/src/collectors/charts.d.plugin/sensors/metadata.yaml"
-sidebar_label: "Linux Sensors (sysfs)"
-learn_status: "Published"
-learn_rel_path: "Collecting Metrics/Hardware Devices and Sensors"
-most_popular: False
-message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
-endmeta-->
-
-# Linux Sensors (sysfs)
-
-
-<img src="https://netdata.cloud/img/microchip.svg" width="150"/>
-
-
-Plugin: charts.d.plugin
-Module: sensors
-
-<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
-
-## Overview
-
-Use this collector when `lm-sensors` doesn't work on your device (e.g. for RPi temperatures).
-For all other cases use the [Go collector](/src/go/plugin/go.d/modules/sensors/README.md), which supports multiple jobs, is more efficient and performs calculations on top of the kernel provided values."
-
-
-It will provide charts for all configured system sensors, by reading sensors directly from the kernel.
-The values graphed are the raw hardware values of the sensors.
-
-
-This collector is only supported on the following platforms:
-
-- Linux
-
-This collector supports collecting metrics from multiple instances of this integration, including remote instances.
-
-
-### Default Behavior
-
-#### Auto-Detection
-
-By default, the collector will try to read entries under `/sys/devices`
-
-#### Limits
-
-The default configuration for this integration does not impose any limits on data collection.
-
-#### Performance Impact
-
-The default configuration for this integration is not expected to impose a significant performance impact on the system.
-
-
-## Metrics
-
-Metrics grouped by *scope*.
-
-The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
-
-
-
-### Per sensor chip
-
-Metrics related to sensor chips. Each chip provides its own set of the following metrics.
-
-This scope has no labels.
-
-Metrics:
-
-| Metric | Dimensions | Unit |
-|:------|:----------|:----|
-| sensors.temp | {filename} | Celsius |
-| sensors.volt | {filename} | Volts |
-| sensors.curr | {filename} | Ampere |
-| sensors.power | {filename} | Watt |
-| sensors.fans | {filename} | Rotations / Minute |
-| sensors.energy | {filename} | Joule |
-| sensors.humidity | {filename} | Percent |
-
-
-
-## Alerts
-
-There are no alerts configured by default for this integration.
-
-
-## Setup
-
-### Prerequisites
-
-#### Install charts.d plugin
-
-If [using our official native DEB/RPM packages](/packaging/installer/UPDATE.md#determine-which-installation-method-you-used), make sure `netdata-plugin-chartsd` is installed.
-
-
-#### Enable the sensors collector
-
-The `sensors` collector is disabled by default. To enable it, use `edit-config` from the Netdata [config directory](/docs/netdata-agent/configuration/README.md), which is typically at `/etc/netdata`, to edit the `charts.d.conf` file.
-
-```bash
-cd /etc/netdata # Replace this path with your Netdata config directory, if different
-sudo ./edit-config charts.d.conf
-```
-
-Change the value of the `sensors` setting to `force` and uncomment the line. Save the file and restart the Netdata Agent with `sudo systemctl restart netdata`, or the [appropriate method](/packaging/installer/README.md#maintaining-a-netdata-agent-installation) for your system.
-
-
-
-### Configuration
-
-#### File
-
-The configuration file name for this integration is `charts.d/sensors.conf`.
-
-
-You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
-
-```bash
-cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
-sudo ./edit-config charts.d/sensors.conf
-```
-#### Options
-
-The config file is sourced by the charts.d plugin. It's a standard bash file.
-
-The following collapsed table contains all the options that can be configured for the sensors collector.
-
-
-<details open><summary>Config options</summary>
-
-| Name | Description | Default | Required |
-|:----|:-----------|:-------|:--------:|
-| sensors_sys_dir | The directory the kernel exposes sensor data. | /sys/devices | no |
-| sensors_sys_depth | How deep in the tree to check for sensor data. | 10 | no |
-| sensors_source_update | If set to 1, the script will overwrite internal script functions with code generated ones. | 1 | no |
-| sensors_update_every | The data collection frequency. If unset, will inherit the netdata update frequency. | 1 | no |
-| sensors_priority | The charts priority on the dashboard. | 90000 | no |
-| sensors_retries | The number of retries to do in case of failure before disabling the collector. | 10 | no |
-
-</details>
-
-#### Examples
-
-##### Set sensors path depth
-
-Set a different sensors path depth
-
-```yaml
-# the directory the kernel keeps sensor data
-#sensors_sys_dir="/sys/devices"
-
-# how deep in the tree to check for sensor data
-sensors_sys_depth=5
-
-# if set to 1, the script will overwrite internal
-# script functions with code generated ones
-# leave to 1, is faster
-#sensors_source_update=1
-
-# the data collection frequency
-# if unset, will inherit the netdata update frequency
-#sensors_update_every=
-
-# the charts priority on the dashboard
-#sensors_priority=90000
-
-# the number of retries to do in case of failure
-# before disabling the module
-#sensors_retries=10
-
-```
-
-
-## Troubleshooting
-
-### Debug Mode
-
-
-To troubleshoot issues with the `sensors` collector, run the `charts.d.plugin` with the debug option enabled. The output
-should give you clues as to why the collector isn't working.
-
-- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
- your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
-
- ```bash
- cd /usr/libexec/netdata/plugins.d/
- ```
-
-- Switch to the `netdata` user.
-
- ```bash
- sudo -u netdata -s
- ```
-
-- Run the `charts.d.plugin` to debug the collector:
-
- ```bash
- ./charts.d.plugin debug 1 sensors
- ```
-
-### Getting Logs
-
-If you're encountering problems with the `sensors` collector, follow these steps to retrieve logs and identify potential issues:
-
-- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
-- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
-
-#### System with systemd
-
-Use the following command to view logs generated since the last Netdata service restart:
-
-```bash
-journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep sensors
-```
-
-#### System without systemd
-
-Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
-
-```bash
-grep sensors /var/log/netdata/collector.log
-```
-
-**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
-
-#### Docker Container
-
-If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
-
-```bash
-docker logs netdata 2>&1 | grep sensors
-```
-
-
diff --git a/src/collectors/charts.d.plugin/sensors/metadata.yaml b/src/collectors/charts.d.plugin/sensors/metadata.yaml
deleted file mode 100644
index 9aacdd353..000000000
--- a/src/collectors/charts.d.plugin/sensors/metadata.yaml
+++ /dev/null
@@ -1,182 +0,0 @@
-plugin_name: charts.d.plugin
-modules:
- - meta:
- plugin_name: charts.d.plugin
- module_name: sensors
- monitored_instance:
- name: Linux Sensors (sysfs)
- link: "https://www.kernel.org/doc/Documentation/hwmon/sysfs-interface"
- categories:
- - data-collection.hardware-devices-and-sensors
- icon_filename: "microchip.svg"
- related_resources:
- integrations:
- list: []
- info_provided_to_referring_integrations:
- description: ""
- keywords:
- - sensors
- - sysfs
- - hwmon
- - rpi
- - raspberry pi
- most_popular: false
- overview:
- data_collection:
- metrics_description: |
- Use this collector when `lm-sensors` doesn't work on your device (e.g. for RPi temperatures).
- For all other cases use the [Go collector](/src/go/plugin/go.d/modules/sensors/README.md), which supports multiple jobs, is more efficient and performs calculations on top of the kernel provided values."
- method_description: |
- It will provide charts for all configured system sensors, by reading sensors directly from the kernel.
- The values graphed are the raw hardware values of the sensors.
- supported_platforms:
- include: [Linux]
- exclude: []
- multi_instance: true
- additional_permissions:
- description: ""
- default_behavior:
- auto_detection:
- description: "By default, the collector will try to read entries under `/sys/devices`"
- limits:
- description: ""
- performance_impact:
- description: ""
- setup:
- prerequisites:
- list:
- - title: "Install charts.d plugin"
- description: |
- If [using our official native DEB/RPM packages](/packaging/installer/UPDATE.md#determine-which-installation-method-you-used), make sure `netdata-plugin-chartsd` is installed.
- - title: "Enable the sensors collector"
- description: |
- The `sensors` collector is disabled by default. To enable it, use `edit-config` from the Netdata [config directory](/docs/netdata-agent/configuration/README.md), which is typically at `/etc/netdata`, to edit the `charts.d.conf` file.
-
- ```bash
- cd /etc/netdata # Replace this path with your Netdata config directory, if different
- sudo ./edit-config charts.d.conf
- ```
-
- Change the value of the `sensors` setting to `force` and uncomment the line. Save the file and restart the Netdata Agent with `sudo systemctl restart netdata`, or the [appropriate method](/packaging/installer/README.md#maintaining-a-netdata-agent-installation) for your system.
- configuration:
- file:
- name: charts.d/sensors.conf
- options:
- description: |
- The config file is sourced by the charts.d plugin. It's a standard bash file.
-
- The following collapsed table contains all the options that can be configured for the sensors collector.
- folding:
- title: "Config options"
- enabled: true
- list:
- - name: sensors_sys_dir
- description: The directory the kernel exposes sensor data.
- default_value: "/sys/devices"
- required: false
- - name: sensors_sys_depth
- description: How deep in the tree to check for sensor data.
- default_value: 10
- required: false
- - name: sensors_source_update
- description: If set to 1, the script will overwrite internal script functions with code generated ones.
- default_value: 1
- required: false
- - name: sensors_update_every
- description: The data collection frequency. If unset, will inherit the netdata update frequency.
- default_value: 1
- required: false
- - name: sensors_priority
- description: The charts priority on the dashboard.
- default_value: 90000
- required: false
- - name: sensors_retries
- description: The number of retries to do in case of failure before disabling the collector.
- default_value: 10
- required: false
- examples:
- folding:
- enabled: false
- title: "Config"
- list:
- - name: Set sensors path depth
- description: Set a different sensors path depth
- config: |
- # the directory the kernel keeps sensor data
- #sensors_sys_dir="/sys/devices"
-
- # how deep in the tree to check for sensor data
- sensors_sys_depth=5
-
- # if set to 1, the script will overwrite internal
- # script functions with code generated ones
- # leave to 1, is faster
- #sensors_source_update=1
-
- # the data collection frequency
- # if unset, will inherit the netdata update frequency
- #sensors_update_every=
-
- # the charts priority on the dashboard
- #sensors_priority=90000
-
- # the number of retries to do in case of failure
- # before disabling the module
- #sensors_retries=10
- troubleshooting:
- problems:
- list: []
- alerts: []
- metrics:
- folding:
- title: Metrics
- enabled: false
- description: ""
- availability: []
- scopes:
- - name: sensor chip
- description: "Metrics related to sensor chips. Each chip provides its own set of the following metrics."
- labels: []
- metrics:
- - name: sensors.temp
- description: Temperature
- unit: "Celsius"
- chart_type: line
- dimensions:
- - name: "{filename}"
- - name: sensors.volt
- description: Voltage
- unit: "Volts"
- chart_type: line
- dimensions:
- - name: "{filename}"
- - name: sensors.curr
- description: Current
- unit: "Ampere"
- chart_type: line
- dimensions:
- - name: "{filename}"
- - name: sensors.power
- description: Power
- unit: "Watt"
- chart_type: line
- dimensions:
- - name: "{filename}"
- - name: sensors.fans
- description: Fans Speed
- unit: "Rotations / Minute"
- chart_type: line
- dimensions:
- - name: "{filename}"
- - name: sensors.energy
- description: Energy
- unit: "Joule"
- chart_type: area
- dimensions:
- - name: "{filename}"
- - name: sensors.humidity
- description: Humidity
- unit: "Percent"
- chart_type: line
- dimensions:
- - name: "{filename}"
diff --git a/src/collectors/charts.d.plugin/sensors/sensors.chart.sh b/src/collectors/charts.d.plugin/sensors/sensors.chart.sh
deleted file mode 100644
index 9576e2ab2..000000000
--- a/src/collectors/charts.d.plugin/sensors/sensors.chart.sh
+++ /dev/null
@@ -1,250 +0,0 @@
-# shellcheck shell=bash
-# no need for shebang - this file is loaded from charts.d.plugin
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# netdata
-# real-time performance and health monitoring, done right!
-# (C) 2016 Costa Tsaousis <costa@tsaousis.gr>
-#
-
-# sensors docs
-# https://www.kernel.org/doc/Documentation/hwmon/sysfs-interface
-
-# if this chart is called X.chart.sh, then all functions and global variables
-# must start with X_
-
-# the directory the kernel keeps sensor data
-sensors_sys_dir="${NETDATA_HOST_PREFIX}/sys/devices"
-
-# how deep in the tree to check for sensor data
-sensors_sys_depth=10
-
-# if set to 1, the script will overwrite internal
-# script functions with code generated ones
-# leave to 1, is faster
-sensors_source_update=1
-
-# how frequently to collect sensor data
-# the default is to collect it at every iteration of charts.d
-sensors_update_every=
-
-sensors_priority=90000
-
-declare -A sensors_excluded=()
-
-sensors_find_all_files() {
- find "$1" -maxdepth $sensors_sys_depth -name \*_input -o -name temp 2>/dev/null
-}
-
-sensors_find_all_dirs() {
- # shellcheck disable=SC2162
- sensors_find_all_files "$1" | while read; do
- dirname "$REPLY"
- done | sort -u
-}
-
-# _check is called once, to find out if this chart should be enabled or not
-sensors_check() {
-
- # this should return:
- # - 0 to enable the chart
- # - 1 to disable the chart
-
- [ -z "$(sensors_find_all_files "$sensors_sys_dir")" ] && error "no sensors found in '$sensors_sys_dir'." && return 1
- return 0
-}
-
-sensors_check_files() {
- # we only need sensors that report a non-zero value
- # also remove not needed sensors
-
- local f v excluded
- for f in "$@"; do
- [ ! -f "$f" ] && continue
- for ex in "${sensors_excluded[@]}"; do
- [[ $f =~ .*$ex$ ]] && excluded='1' && break
- done
-
- [ "$excluded" != "1" ] && v="$(cat "$f")" || v=0
- v=$((v + 1 - 1))
- [ $v -ne 0 ] && echo "$f" && continue
- excluded=
-
- error "$f gives zero values"
- done
-}
-
-sensors_check_temp_type() {
- # valid temp types are 1 to 6
- # disabled sensors have the value 0
-
- local f t v
- for f in "$@"; do
- # shellcheck disable=SC2001
- t=$(echo "$f" | sed "s|_input$|_type|g")
- [ "$f" = "$t" ] && echo "$f" && continue
- [ ! -f "$t" ] && echo "$f" && continue
-
- v="$(cat "$t")"
- v=$((v + 1 - 1))
- [ $v -ne 0 ] && echo "$f" && continue
-
- error "$f is disabled"
- done
-}
-
-# _create is called once, to create the charts
-sensors_create() {
- local path dir name x file lfile labelname device subsystem id type mode files multiplier divisor
-
- # we create a script with the source of the
- # sensors_update() function
- # - the highest speed we can achieve -
- [ $sensors_source_update -eq 1 ] && echo >"$TMP_DIR/sensors.sh" "sensors_update() {"
-
- for path in $(sensors_find_all_dirs "$sensors_sys_dir" | sort -u); do
- dir=$(basename "$path")
- device=
- subsystem=
- id=
- type=
- name=
-
- [ -h "$path/device" ] && device=$(readlink -f "$path/device")
- [ ! -z "$device" ] && device=$(basename "$device")
- [ -z "$device" ] && device="$dir"
-
- [ -h "$path/subsystem" ] && subsystem=$(readlink -f "$path/subsystem")
- [ ! -z "$subsystem" ] && subsystem=$(basename "$subsystem")
- [ -z "$subsystem" ] && subsystem="$dir"
-
- [ -f "$path/name" ] && name=$(cat "$path/name")
- [ -z "$name" ] && name="$dir"
-
- [ -f "$path/type" ] && type=$(cat "$path/type")
- [ -z "$type" ] && type="$dir"
-
- id="$(fixid "$device.$subsystem.$dir")"
-
- debug "path='$path', dir='$dir', device='$device', subsystem='$subsystem', id='$id', name='$name'"
-
- for mode in temperature voltage fans power current energy humidity; do
- files=
- multiplier=1
- divisor=1
- algorithm="absolute"
-
- case $mode in
- temperature)
- files="$(
- ls "$path"/temp*_input 2>/dev/null
- ls "$path/temp" 2>/dev/null
- )"
- files="$(sensors_check_files "$files")"
- files="$(sensors_check_temp_type "$files")"
- [ -z "$files" ] && continue
- echo "CHART 'sensors.temp_${id}_${name}' '' 'Temperature' 'Celsius' 'temperature' 'sensors.temp' line $((sensors_priority + 1)) $sensors_update_every '' '' 'sensors'"
- echo >>"$TMP_DIR/sensors.sh" "echo \"BEGIN 'sensors.temp_${id}_${name}' \$1\""
- divisor=1000
- ;;
-
- voltage)
- files="$(ls "$path"/in*_input 2>/dev/null)"
- files="$(sensors_check_files "$files")"
- [ -z "$files" ] && continue
- echo "CHART 'sensors.volt_${id}_${name}' '' 'Voltage' 'Volts' 'voltage' 'sensors.volt' line $((sensors_priority + 2)) $sensors_update_every '' '' 'sensors'"
- echo >>"$TMP_DIR/sensors.sh" "echo \"BEGIN 'sensors.volt_${id}_${name}' \$1\""
- divisor=1000
- ;;
-
- current)
- files="$(ls "$path"/curr*_input 2>/dev/null)"
- files="$(sensors_check_files "$files")"
- [ -z "$files" ] && continue
- echo "CHART 'sensors.curr_${id}_${name}' '' 'Current' 'Ampere' 'current' 'sensors.curr' line $((sensors_priority + 3)) $sensors_update_every '' '' 'sensors'"
- echo >>"$TMP_DIR/sensors.sh" "echo \"BEGIN 'sensors.curr_${id}_${name}' \$1\""
- divisor=1000
- ;;
-
- power)
- files="$(ls "$path"/power*_input 2>/dev/null)"
- files="$(sensors_check_files "$files")"
- [ -z "$files" ] && continue
- echo "CHART 'sensors.power_${id}_${name}' '' 'Power' 'Watt' 'power' 'sensors.power' line $((sensors_priority + 4)) $sensors_update_every '' '' 'sensors'"
- echo >>"$TMP_DIR/sensors.sh" "echo \"BEGIN 'sensors.power_${id}_${name}' \$1\""
- divisor=1000000
- ;;
-
- fans)
- files="$(ls "$path"/fan*_input 2>/dev/null)"
- files="$(sensors_check_files "$files")"
- [ -z "$files" ] && continue
- echo "CHART 'sensors.fan_${id}_${name}' '' 'Fans Speed' 'Rotations / Minute' 'fans' 'sensors.fans' line $((sensors_priority + 5)) $sensors_update_every '' '' 'sensors'"
- echo >>"$TMP_DIR/sensors.sh" "echo \"BEGIN 'sensors.fan_${id}_${name}' \$1\""
- ;;
-
- energy)
- files="$(ls "$path"/energy*_input 2>/dev/null)"
- files="$(sensors_check_files "$files")"
- [ -z "$files" ] && continue
- echo "CHART 'sensors.energy_${id}_${name}' '' 'Energy' 'Joule' 'energy' 'sensors.energy' area $((sensors_priority + 6)) $sensors_update_every '' '' 'sensors'"
- echo >>"$TMP_DIR/sensors.sh" "echo \"BEGIN 'sensors.energy_${id}_${name}' \$1\""
- algorithm="incremental"
- divisor=1000000
- ;;
-
- humidity)
- files="$(ls "$path"/humidity*_input 2>/dev/null)"
- files="$(sensors_check_files "$files")"
- [ -z "$files" ] && continue
- echo "CHART 'sensors.humidity_${id}_${name}' '' 'Humidity' 'Percent' 'humidity' 'sensors.humidity' line $((sensors_priority + 7)) $sensors_update_every '' '' 'sensors'"
- echo >>"$TMP_DIR/sensors.sh" "echo \"BEGIN 'sensors.humidity_${id}_${name}' \$1\""
- divisor=1000
- ;;
-
- *)
- continue
- ;;
- esac
-
- for x in $files; do
- file="$x"
- fid="$(fixid "$file")"
- lfile="$(basename "$file" | sed "s|_input$|_label|g")"
- labelname="$(basename "$file" | sed "s|_input$||g")"
-
- if [ ! "$path/$lfile" = "$file" ] && [ -f "$path/$lfile" ]; then
- labelname="$(cat "$path/$lfile")"
- fi
-
- echo "DIMENSION $fid '$labelname' $algorithm $multiplier $divisor"
- echo >>"$TMP_DIR/sensors.sh" "echo \"SET $fid = \"\$(< $file )"
- done
-
- echo >>"$TMP_DIR/sensors.sh" "echo END"
- done
- done
-
- [ $sensors_source_update -eq 1 ] && echo >>"$TMP_DIR/sensors.sh" "}"
-
- # ok, load the function sensors_update() we created
- # shellcheck source=/dev/null
- [ $sensors_source_update -eq 1 ] && . "$TMP_DIR/sensors.sh"
-
- return 0
-}
-
-# _update is called continuously, to collect the values
-sensors_update() {
- # the first argument to this function is the microseconds since last update
- # pass this parameter to the BEGIN statement (see below).
-
- # do all the work to collect / calculate the values
- # for each dimension
- # remember: KEEP IT SIMPLE AND SHORT
-
- # shellcheck source=/dev/null
- [ $sensors_source_update -eq 0 ] && . "$TMP_DIR/sensors.sh" "$1"
-
- return 0
-}
diff --git a/src/collectors/charts.d.plugin/sensors/sensors.conf b/src/collectors/charts.d.plugin/sensors/sensors.conf
deleted file mode 100644
index bcb28807d..000000000
--- a/src/collectors/charts.d.plugin/sensors/sensors.conf
+++ /dev/null
@@ -1,32 +0,0 @@
-# no need for shebang - this file is loaded from charts.d.plugin
-
-# netdata
-# real-time performance and health monitoring, done right!
-# (C) 2018 Costa Tsaousis <costa@tsaousis.gr>
-# GPL v3+
-
-# THIS PLUGIN IS DEPRECATED
-# USE THE PYTHON.D ONE
-
-# the directory the kernel keeps sensor data
-#sensors_sys_dir="/sys/devices"
-
-# how deep in the tree to check for sensor data
-#sensors_sys_depth=10
-
-# if set to 1, the script will overwrite internal
-# script functions with code generated ones
-# leave to 1, is faster
-#sensors_source_update=1
-
-# the data collection frequency
-# if unset, will inherit the netdata update frequency
-#sensors_update_every=
-
-# the charts priority on the dashboard
-#sensors_priority=90000
-
-# the number of retries to do in case of failure
-# before disabling the module
-#sensors_retries=10
-
diff --git a/src/collectors/checks.plugin/README.md b/src/collectors/checks.plugin/README.md
deleted file mode 100644
index 806b1e66c..000000000
--- a/src/collectors/checks.plugin/README.md
+++ /dev/null
@@ -1,12 +0,0 @@
-<!--
-title: "checks.plugin"
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/collectors/checks.plugin/README.md"
-sidebar_label: "checks.plugin"
-learn_status: "Unpublished"
--->
-
-# checks.plugin
-
-A debugging plugin (by default it is disabled)
-
-
diff --git a/src/collectors/common-contexts/common-contexts.h b/src/collectors/common-contexts/common-contexts.h
index 1938230dc..4c7e58e7f 100644
--- a/src/collectors/common-contexts/common-contexts.h
+++ b/src/collectors/common-contexts/common-contexts.h
@@ -18,14 +18,22 @@
typedef void (*instance_labels_cb_t)(RRDSET *st, void *data);
-#include "system.io.h"
-#include "system.ram.h"
-#include "system.interrupts.h"
-#include "system.processes.h"
-#include "system.ipc.h"
-#include "mem.swap.h"
-#include "mem.pgfaults.h"
-#include "mem.available.h"
-#include "disk.io.h"
+#include "system-io.h"
+#include "system-ram.h"
+#include "system-interrupts.h"
+#include "system-processes.h"
+#include "system-ipc.h"
+#include "mem-swap.h"
+#include "mem-pgfaults.h"
+#include "mem-available.h"
+#include "disk-io.h"
+#include "disk-ops.h"
+#include "disk-qops.h"
+#include "disk-util.h"
+#include "disk-busy.h"
+#include "disk-iotime.h"
+#include "disk-await.h"
+#include "disk-svctm.h"
+#include "disk-avgsz.h"
#endif //NETDATA_COMMON_CONTEXTS_H
diff --git a/src/collectors/common-contexts/disk-avgsz.h b/src/collectors/common-contexts/disk-avgsz.h
new file mode 100644
index 000000000..16cca247a
--- /dev/null
+++ b/src/collectors/common-contexts/disk-avgsz.h
@@ -0,0 +1,44 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_DISK_AVGSZ_H
+#define NETDATA_DISK_AVGSZ_H
+
+#include "common-contexts.h"
+
+typedef struct {
+ RRDSET *st_avgsz;
+ RRDDIM *rd_avgsz_reads;
+ RRDDIM *rd_avgsz_writes;
+} ND_DISK_AVGSZ;
+
+static inline void common_disk_avgsz(ND_DISK_AVGSZ *d, const char *id, const char *name, uint64_t avg_bytes_read, uint64_t avg_bytes_write, int update_every, instance_labels_cb_t cb, void *data) {
+ if(unlikely(!d->st_avgsz)) {
+ d->st_avgsz = rrdset_create_localhost(
+ "disk_avgsz"
+ , id
+ , name
+ , "io"
+ , "disk.avgsz"
+ , "Average Completed I/O Operation Bandwidth"
+ , "KiB/operation"
+ , _COMMON_PLUGIN_NAME
+ , _COMMON_PLUGIN_MODULE_NAME
+ , NETDATA_CHART_PRIO_DISK_AVGSZ
+ , update_every
+ , RRDSET_TYPE_AREA
+ );
+
+ d->rd_avgsz_reads = rrddim_add(d->st_avgsz, "reads", NULL, 1, 1024, RRD_ALGORITHM_ABSOLUTE);
+ d->rd_avgsz_writes = rrddim_add(d->st_avgsz, "writes", NULL, -1, 1024, RRD_ALGORITHM_ABSOLUTE);
+
+ if(cb)
+ cb(d->st_avgsz, data);
+ }
+
+ // this always have to be in base units, so that exporting sends base units to other time-series db
+ rrddim_set_by_pointer(d->st_avgsz, d->rd_avgsz_reads, (collected_number)avg_bytes_read);
+ rrddim_set_by_pointer(d->st_avgsz, d->rd_avgsz_writes, (collected_number)avg_bytes_write);
+ rrdset_done(d->st_avgsz);
+}
+
+#endif //NETDATA_DISK_AVGSZ_H
diff --git a/src/collectors/common-contexts/disk-await.h b/src/collectors/common-contexts/disk-await.h
new file mode 100644
index 000000000..b4142569e
--- /dev/null
+++ b/src/collectors/common-contexts/disk-await.h
@@ -0,0 +1,44 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_DISK_AWAIT_H
+#define NETDATA_DISK_AWAIT_H
+
+#include "common-contexts.h"
+
+typedef struct {
+ RRDSET *st_await;
+ RRDDIM *rd_await_reads;
+ RRDDIM *rd_await_writes;
+} ND_DISK_AWAIT;
+
+static inline void common_disk_await(ND_DISK_AWAIT *d, const char *id, const char *name, double read_avg_ms, double write_avg_ms, int update_every, instance_labels_cb_t cb, void *data) {
+ if(unlikely(!d->st_await)) {
+ d->st_await = rrdset_create_localhost(
+ "disk_await"
+ , id
+ , name
+ , "latency"
+ , "disk.await"
+ , "Average Completed I/O Operation Time"
+ , "milliseconds/operation"
+ , _COMMON_PLUGIN_NAME
+ , _COMMON_PLUGIN_MODULE_NAME
+ , NETDATA_CHART_PRIO_DISK_AWAIT
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ d->rd_await_reads = rrddim_add(d->st_await, "reads", NULL, 1, 1000, RRD_ALGORITHM_ABSOLUTE);
+ d->rd_await_writes = rrddim_add(d->st_await, "writes", NULL, -1, 1000, RRD_ALGORITHM_ABSOLUTE);
+
+ if(cb)
+ cb(d->st_await, data);
+ }
+
+ // this always have to be in base units, so that exporting sends base units to other time-series db
+ rrddim_set_by_pointer(d->st_await, d->rd_await_reads, (collected_number)(read_avg_ms * 1000.0));
+ rrddim_set_by_pointer(d->st_await, d->rd_await_writes, (collected_number)(write_avg_ms * 1000.0));
+ rrdset_done(d->st_await);
+}
+
+#endif //NETDATA_DISK_AWAIT_H
diff --git a/src/collectors/common-contexts/disk-busy.h b/src/collectors/common-contexts/disk-busy.h
new file mode 100644
index 000000000..92679d9ef
--- /dev/null
+++ b/src/collectors/common-contexts/disk-busy.h
@@ -0,0 +1,41 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_DISK_BUSY_H
+#define NETDATA_DISK_BUSY_H
+
+#include "common-contexts.h"
+
+typedef struct {
+ RRDSET *st_busy;
+ RRDDIM *rd_busy;
+} ND_DISK_BUSY;
+
+static inline void common_disk_busy(ND_DISK_BUSY *d, const char *id, const char *name, uint64_t busy_ms, int update_every, instance_labels_cb_t cb, void *data) {
+ if(unlikely(!d->st_busy)) {
+ d->st_busy = rrdset_create_localhost(
+ "disk_busy"
+ , id
+ , name
+ , "utilization"
+ , "disk.busy"
+ , "Disk Busy Time"
+ , "milliseconds"
+ , _COMMON_PLUGIN_NAME
+ , _COMMON_PLUGIN_MODULE_NAME
+ , NETDATA_CHART_PRIO_DISK_BUSY
+ , update_every
+ , RRDSET_TYPE_AREA
+ );
+
+ d->rd_busy = rrddim_add(d->st_busy, "busy", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+
+ if(cb)
+ cb(d->st_busy, data);
+ }
+
+ // this always have to be in base units, so that exporting sends base units to other time-series db
+ rrddim_set_by_pointer(d->st_busy, d->rd_busy, (collected_number)busy_ms);
+ rrdset_done(d->st_busy);
+}
+
+#endif //NETDATA_DISK_BUSY_H
diff --git a/src/collectors/common-contexts/disk.io.h b/src/collectors/common-contexts/disk-io.h
index 26f98b9be..26f98b9be 100644
--- a/src/collectors/common-contexts/disk.io.h
+++ b/src/collectors/common-contexts/disk-io.h
diff --git a/src/collectors/common-contexts/disk-iotime.h b/src/collectors/common-contexts/disk-iotime.h
new file mode 100644
index 000000000..29707287a
--- /dev/null
+++ b/src/collectors/common-contexts/disk-iotime.h
@@ -0,0 +1,44 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_DISK_IOTIME_H
+#define NETDATA_DISK_IOTIME_H
+
+#include "common-contexts.h"
+
+typedef struct {
+ RRDSET *st_iotime;
+ RRDDIM *rd_reads_ms;
+ RRDDIM *rd_writes_ms;
+} ND_DISK_IOTIME;
+
+static inline void common_disk_iotime(ND_DISK_IOTIME *d, const char *id, const char *name, uint64_t reads_ms, uint64_t writes_ms, int update_every, instance_labels_cb_t cb, void *data) {
+ if(unlikely(!d->st_iotime)) {
+ d->st_iotime = rrdset_create_localhost(
+ "disk_iotime"
+ , id
+ , name
+ , "utilization"
+ , "disk.iotime"
+ , "Disk Total I/O Time"
+ , "milliseconds/s"
+ , _COMMON_PLUGIN_NAME
+ , _COMMON_PLUGIN_MODULE_NAME
+ , NETDATA_CHART_PRIO_DISK_IOTIME
+ , update_every
+ , RRDSET_TYPE_AREA
+ );
+
+ d->rd_reads_ms = rrddim_add(d->st_iotime, "reads", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ d->rd_writes_ms = rrddim_add(d->st_iotime, "writes", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+
+ if(cb)
+ cb(d->st_iotime, data);
+ }
+
+ // this always have to be in base units, so that exporting sends base units to other time-series db
+ rrddim_set_by_pointer(d->st_iotime, d->rd_reads_ms, (collected_number)reads_ms);
+ rrddim_set_by_pointer(d->st_iotime, d->rd_writes_ms, (collected_number)writes_ms);
+ rrdset_done(d->st_iotime);
+}
+
+#endif //NETDATA_DISK_IOTIME_H
diff --git a/src/collectors/common-contexts/disk-ops.h b/src/collectors/common-contexts/disk-ops.h
new file mode 100644
index 000000000..6e1ac4690
--- /dev/null
+++ b/src/collectors/common-contexts/disk-ops.h
@@ -0,0 +1,44 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_DISK_OPS_H
+#define NETDATA_DISK_OPS_H
+
+#include "common-contexts.h"
+
+typedef struct {
+ RRDSET *st_ops;
+ RRDDIM *rd_ops_reads;
+ RRDDIM *rd_ops_writes;
+} ND_DISK_OPS;
+
+static inline void common_disk_ops(ND_DISK_OPS *d, const char *id, const char *name, uint64_t ops_read, uint64_t ops_write, int update_every, instance_labels_cb_t cb, void *data) {
+ if(unlikely(!d->st_ops)) {
+ d->st_ops = rrdset_create_localhost(
+ "disk_ops"
+ , id
+ , name
+ , "ops"
+ , "disk.ops"
+ , "Disk Completed I/O Operations"
+ , "operations/s"
+ , _COMMON_PLUGIN_NAME
+ , _COMMON_PLUGIN_MODULE_NAME
+ , NETDATA_CHART_PRIO_DISK_OPS
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ d->rd_ops_reads = rrddim_add(d->st_ops, "reads", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ d->rd_ops_writes = rrddim_add(d->st_ops, "writes", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+
+ if(cb)
+ cb(d->st_ops, data);
+ }
+
+ // this always have to be in base units, so that exporting sends base units to other time-series db
+ rrddim_set_by_pointer(d->st_ops, d->rd_ops_reads, (collected_number)ops_read);
+ rrddim_set_by_pointer(d->st_ops, d->rd_ops_writes, (collected_number)ops_write);
+ rrdset_done(d->st_ops);
+}
+
+#endif //NETDATA_DISK_OPS_H
diff --git a/src/collectors/common-contexts/disk-qops.h b/src/collectors/common-contexts/disk-qops.h
new file mode 100644
index 000000000..89f38cb27
--- /dev/null
+++ b/src/collectors/common-contexts/disk-qops.h
@@ -0,0 +1,41 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_DISK_QOPS_H
+#define NETDATA_DISK_QOPS_H
+
+#include "common-contexts.h"
+
+typedef struct {
+ RRDSET *st_qops;
+ RRDDIM *rd_qops;
+} ND_DISK_QOPS;
+
+static inline void common_disk_qops(ND_DISK_QOPS *d, const char *id, const char *name, uint64_t queued_ops, int update_every, instance_labels_cb_t cb, void *data) {
+ if(unlikely(!d->st_qops)) {
+ d->st_qops = rrdset_create_localhost(
+ "disk_qops"
+ , id
+ , name
+ , "ops"
+ , "disk.qops"
+ , "Disk Current I/O Operations"
+ , "operations"
+ , _COMMON_PLUGIN_NAME
+ , _COMMON_PLUGIN_MODULE_NAME
+ , NETDATA_CHART_PRIO_DISK_QOPS
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ d->rd_qops = rrddim_add(d->st_qops, "operations", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+
+ if(cb)
+ cb(d->st_qops, data);
+ }
+
+ // this always have to be in base units, so that exporting sends base units to other time-series db
+ rrddim_set_by_pointer(d->st_qops, d->rd_qops, (collected_number)queued_ops);
+ rrdset_done(d->st_qops);
+}
+
+#endif //NETDATA_DISK_QOPS_H
diff --git a/src/collectors/common-contexts/disk-svctm.h b/src/collectors/common-contexts/disk-svctm.h
new file mode 100644
index 000000000..f1d07c150
--- /dev/null
+++ b/src/collectors/common-contexts/disk-svctm.h
@@ -0,0 +1,41 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_DISK_SVCTM_H
+#define NETDATA_DISK_SVCTM_H
+
+#include "common-contexts.h"
+
+typedef struct {
+ RRDSET *st_svctm;
+ RRDDIM *rd_svctm;
+} ND_DISK_SVCTM;
+
+static inline void common_disk_svctm(ND_DISK_SVCTM *d, const char *id, const char *name, double svctm_ms, int update_every, instance_labels_cb_t cb, void *data) {
+ if(unlikely(!d->st_svctm)) {
+ d->st_svctm = rrdset_create_localhost(
+ "disk_svctm"
+ , id
+ , name
+ , "latency"
+ , "disk.svctm"
+ , "Average Service Time"
+ , "milliseconds/operation"
+ , _COMMON_PLUGIN_NAME
+ , _COMMON_PLUGIN_MODULE_NAME
+ , NETDATA_CHART_PRIO_DISK_SVCTM
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ d->rd_svctm = rrddim_add(d->st_svctm, "svctm", NULL, 1, 1000, RRD_ALGORITHM_ABSOLUTE);
+
+ if(cb)
+ cb(d->st_svctm, data);
+ }
+
+ // this always have to be in base units, so that exporting sends base units to other time-series db
+ rrddim_set_by_pointer(d->st_svctm, d->rd_svctm, (collected_number)(svctm_ms * 1000.0));
+ rrdset_done(d->st_svctm);
+}
+
+#endif //NETDATA_DISK_SVCTM_H
diff --git a/src/collectors/common-contexts/disk-util.h b/src/collectors/common-contexts/disk-util.h
new file mode 100644
index 000000000..8733975f6
--- /dev/null
+++ b/src/collectors/common-contexts/disk-util.h
@@ -0,0 +1,41 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_DISK_UTIL_H
+#define NETDATA_DISK_UTIL_H
+
+#include "common-contexts.h"
+
+typedef struct {
+ RRDSET *st_util;
+ RRDDIM *rd_util;
+} ND_DISK_UTIL;
+
+static inline void common_disk_util(ND_DISK_UTIL *d, const char *id, const char *name, uint64_t percent, int update_every, instance_labels_cb_t cb, void *data) {
+ if(unlikely(!d->st_util)) {
+ d->st_util = rrdset_create_localhost(
+ "disk_util"
+ , id
+ , name
+ , "utilization"
+ , "disk.util"
+ , "Disk Utilization Time"
+ , "% of time working"
+ , _COMMON_PLUGIN_NAME
+ , _COMMON_PLUGIN_MODULE_NAME
+ , NETDATA_CHART_PRIO_DISK_UTIL
+ , update_every
+ , RRDSET_TYPE_AREA
+ );
+
+ d->rd_util = rrddim_add(d->st_util, "utilization", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+
+ if(cb)
+ cb(d->st_util, data);
+ }
+
+ // this always have to be in base units, so that exporting sends base units to other time-series db
+ rrddim_set_by_pointer(d->st_util, d->rd_util, (collected_number)percent);
+ rrdset_done(d->st_util);
+}
+
+#endif //NETDATA_DISK_UTIL_H
diff --git a/src/collectors/common-contexts/mem.available.h b/src/collectors/common-contexts/mem-available.h
index 3f763fe18..3f763fe18 100644
--- a/src/collectors/common-contexts/mem.available.h
+++ b/src/collectors/common-contexts/mem-available.h
diff --git a/src/collectors/common-contexts/mem.pgfaults.h b/src/collectors/common-contexts/mem-pgfaults.h
index 503b9f7e8..8a10449e6 100644
--- a/src/collectors/common-contexts/mem.pgfaults.h
+++ b/src/collectors/common-contexts/mem-pgfaults.h
@@ -25,8 +25,6 @@ static inline void common_mem_pgfaults(uint64_t minor, uint64_t major, int updat
, RRDSET_TYPE_LINE
);
- rrdset_flag_set(st_pgfaults, RRDSET_FLAG_DETAIL);
-
rd_minor = rrddim_add(st_pgfaults, "minor", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
rd_major = rrddim_add(st_pgfaults, "major", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
}
diff --git a/src/collectors/common-contexts/mem.swap.h b/src/collectors/common-contexts/mem-swap.h
index 1c1b053d7..d4c0cfc89 100644
--- a/src/collectors/common-contexts/mem.swap.h
+++ b/src/collectors/common-contexts/mem-swap.h
@@ -30,8 +30,6 @@ static inline void common_mem_swap(uint64_t free_bytes, uint64_t used_bytes, int
, RRDSET_TYPE_STACKED
);
- rrdset_flag_set(st_system_swap, RRDSET_FLAG_DETAIL);
-
rd_free = rrddim_add(st_system_swap, "free", NULL, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE);
rd_used = rrddim_add(st_system_swap, "used", NULL, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE);
}
diff --git a/src/collectors/common-contexts/system.interrupts.h b/src/collectors/common-contexts/system-interrupts.h
index dffd70572..4b78e9469 100644
--- a/src/collectors/common-contexts/system.interrupts.h
+++ b/src/collectors/common-contexts/system-interrupts.h
@@ -27,8 +27,6 @@ static inline void common_interrupts(uint64_t interrupts, int update_every, char
, update_every
, RRDSET_TYPE_LINE);
- rrdset_flag_set(st_intr, RRDSET_FLAG_DETAIL);
-
rd_interrupts = rrddim_add(st_intr, "interrupts", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
}
diff --git a/src/collectors/common-contexts/system.io.h b/src/collectors/common-contexts/system-io.h
index 84440c9b8..84440c9b8 100644
--- a/src/collectors/common-contexts/system.io.h
+++ b/src/collectors/common-contexts/system-io.h
diff --git a/src/collectors/common-contexts/system.ipc.h b/src/collectors/common-contexts/system-ipc.h
index 129ce6dfa..129ce6dfa 100644
--- a/src/collectors/common-contexts/system.ipc.h
+++ b/src/collectors/common-contexts/system-ipc.h
diff --git a/src/collectors/common-contexts/system.processes.h b/src/collectors/common-contexts/system-processes.h
index 1b886d65f..1b886d65f 100644
--- a/src/collectors/common-contexts/system.processes.h
+++ b/src/collectors/common-contexts/system-processes.h
diff --git a/src/collectors/common-contexts/system.ram.h b/src/collectors/common-contexts/system-ram.h
index 6b108405c..6b108405c 100644
--- a/src/collectors/common-contexts/system.ram.h
+++ b/src/collectors/common-contexts/system-ram.h
diff --git a/src/collectors/cups.plugin/cups_plugin.c b/src/collectors/cups.plugin/cups_plugin.c
index 20b155e14..8d9e46cb1 100644
--- a/src/collectors/cups.plugin/cups_plugin.c
+++ b/src/collectors/cups.plugin/cups_plugin.c
@@ -226,7 +226,6 @@ void reset_metrics() {
}
int main(int argc, char **argv) {
- clocks_init();
nd_log_initialize_for_external_plugins("cups.plugin");
parse_command_line(argc, argv);
@@ -243,12 +242,11 @@ int main(int argc, char **argv) {
time_t started_t = now_monotonic_sec();
size_t iteration = 0;
- usec_t step = netdata_update_every * USEC_PER_SEC;
heartbeat_t hb;
- heartbeat_init(&hb);
+ heartbeat_init(&hb, netdata_update_every * USEC_PER_SEC);
for (iteration = 0; 1; iteration++) {
- heartbeat_next(&hb, step);
+ heartbeat_next(&hb);
if (unlikely(netdata_exit))
break;
diff --git a/src/collectors/cups.plugin/integrations/cups.md b/src/collectors/cups.plugin/integrations/cups.md
index 828a7717e..1fec7135f 100644
--- a/src/collectors/cups.plugin/integrations/cups.md
+++ b/src/collectors/cups.plugin/integrations/cups.md
@@ -115,8 +115,8 @@ The file format is a modified INI syntax. The general structure is:
[section2]
option3 = some third value
```
-You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the
+Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
```bash
cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
diff --git a/src/collectors/debugfs.plugin/debugfs_extfrag.c b/src/collectors/debugfs.plugin/debugfs_extfrag.c
index 75da4deca..eb0946125 100644
--- a/src/collectors/debugfs.plugin/debugfs_extfrag.c
+++ b/src/collectors/debugfs.plugin/debugfs_extfrag.c
@@ -60,23 +60,20 @@ static void extfrag_send_chart(char *chart_id, collected_number *values)
}
int do_debugfs_extfrag(int update_every, const char *name) {
- static procfile *ff = NULL;
- static int chart_order = NETDATA_CHART_PRIO_MEM_FRAGMENTATION;
+ static procfile *ff = NULL;;
if (unlikely(!ff)) {
char filename[FILENAME_MAX + 1];
- snprintfz(filename,
- FILENAME_MAX,
- "%s%s",
- netdata_configured_host_prefix,
- "/sys/kernel/debug/extfrag/extfrag_index");
+ snprintfz(filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/sys/kernel/debug/extfrag/extfrag_index");
ff = procfile_open(filename, " \t,", PROCFILE_FLAG_DEFAULT);
- if (unlikely(!ff)) return 1;
+ if (unlikely(!ff))
+ return 1;
}
ff = procfile_readall(ff);
- if (unlikely(!ff)) return 1;
+ if (unlikely(!ff))
+ return 1;
size_t l, i, j, lines = procfile_lines(ff);
for (l = 0; l < lines; l++) {
@@ -102,19 +99,21 @@ int do_debugfs_extfrag(int update_every, const char *name) {
extrafrag->id = extrafrag->node_zone;
fprintf(
stdout,
- "CHART mem.fragmentation_index_%s '' 'Memory fragmentation index for each order' 'index' 'fragmentation' 'mem.fragmentation_index_%s' 'line' %d %d '' 'debugfs.plugin' '%s'\n",
+ "CHART mem.fragmentation_index_%s '' 'Memory fragmentation index for each order' 'index' 'fragmentation' 'mem.numa_node_zone_fragmentation_index' 'line' %d %d '' 'debugfs.plugin' '%s'\n",
extrafrag->node_zone,
- zone_lowercase,
- chart_order++, // FIXME: the same zones must have the same order
+ NETDATA_CHART_PRIO_MEM_FRAGMENTATION,
update_every,
name);
for (i = 0; i < NETDATA_ORDER_FRAGMENTATION; i++) {
fprintf(stdout, "DIMENSION '%s' '%s' absolute 1 1000 ''\n", orders[i], orders[i]);
}
- fprintf(stdout,
- "CLABEL 'numa_node' 'node%s' 1\n"
- "CLABEL_COMMIT\n",
- id);
+ fprintf(
+ stdout,
+ "CLABEL 'numa_node' 'node%s' 1\n"
+ "CLABEL 'zone' '%s' 1\n"
+ "CLABEL_COMMIT\n",
+ id,
+ zone);
}
extfrag_send_chart(chart_id, line_orders);
}
diff --git a/src/collectors/debugfs.plugin/debugfs_plugin.c b/src/collectors/debugfs.plugin/debugfs_plugin.c
index 94e3db631..37b4c83d8 100644
--- a/src/collectors/debugfs.plugin/debugfs_plugin.c
+++ b/src/collectors/debugfs.plugin/debugfs_plugin.c
@@ -159,7 +159,6 @@ static void debugfs_parse_args(int argc, char **argv)
int main(int argc, char **argv)
{
- clocks_init();
nd_log_initialize_for_external_plugins("debugfs.plugin");
netdata_configured_host_prefix = getenv("NETDATA_HOST_PREFIX");
@@ -214,12 +213,11 @@ int main(int argc, char **argv)
debugfs_parse_args(argc, argv);
size_t iteration;
- usec_t step = update_every * USEC_PER_SEC;
heartbeat_t hb;
- heartbeat_init(&hb);
+ heartbeat_init(&hb, update_every * USEC_PER_SEC);
for (iteration = 0; iteration < 86400; iteration++) {
- heartbeat_next(&hb, step);
+ heartbeat_next(&hb);
int enabled = 0;
for (int i = 0; debugfs_modules[i].name; i++) {
diff --git a/src/collectors/debugfs.plugin/integrations/linux_zswap.md b/src/collectors/debugfs.plugin/integrations/linux_zswap.md
index b41a480f9..7c15dd50e 100644
--- a/src/collectors/debugfs.plugin/integrations/linux_zswap.md
+++ b/src/collectors/debugfs.plugin/integrations/linux_zswap.md
@@ -112,8 +112,8 @@ The file format is a modified INI syntax. The general structure is:
[section2]
option3 = some third value
```
-You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the
+Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
```bash
cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
diff --git a/src/collectors/debugfs.plugin/integrations/power_capping.md b/src/collectors/debugfs.plugin/integrations/power_capping.md
index 5acb6bed6..5e9775fd7 100644
--- a/src/collectors/debugfs.plugin/integrations/power_capping.md
+++ b/src/collectors/debugfs.plugin/integrations/power_capping.md
@@ -106,8 +106,8 @@ The file format is a modified INI syntax. The general structure is:
[section2]
option3 = some third value
```
-You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the
+Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
```bash
cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
diff --git a/src/collectors/debugfs.plugin/integrations/system_memory_fragmentation.md b/src/collectors/debugfs.plugin/integrations/system_memory_fragmentation.md
index 3c43a592a..7ff57309f 100644
--- a/src/collectors/debugfs.plugin/integrations/system_memory_fragmentation.md
+++ b/src/collectors/debugfs.plugin/integrations/system_memory_fragmentation.md
@@ -110,8 +110,8 @@ The file format is a modified INI syntax. The general structure is:
[section2]
option3 = some third value
```
-You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the
+Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
```bash
cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
diff --git a/src/collectors/diskspace.plugin/integrations/disk_space.md b/src/collectors/diskspace.plugin/integrations/disk_space.md
index 61015120d..d27e7da25 100644
--- a/src/collectors/diskspace.plugin/integrations/disk_space.md
+++ b/src/collectors/diskspace.plugin/integrations/disk_space.md
@@ -108,8 +108,8 @@ The file format is a modified INI syntax. The general structure is:
[section2]
option3 = some third value
```
-You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the
+Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
```bash
cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
@@ -125,7 +125,7 @@ You can also specify per mount point `[plugin:proc:diskspace:mountpoint]`
|:----|:-----------|:-------|:--------:|
| update every | Data collection frequency. | 1 | no |
| remove charts of unmounted disks | Remove chart when a device is unmounted on host. | yes | no |
-| check for new mount points every | Parse proc files frequency. | 15 | no |
+| check for new mount points every | Parse proc files frequency. | 15s | no |
| exclude space metrics on paths | Do not show metrics (charts) for listed paths. This option accepts netdata simple pattern. | /proc/* /sys/* /var/run/user/* /run/user/* /snap/* /var/lib/docker/* | no |
| exclude space metrics on filesystems | Do not show metrics (charts) for listed filesystems. This option accepts netdata simple pattern. | *gvfs *gluster* *s3fs *ipfs *davfs2 *httpfs *sshfs *gdfs *moosefs fusectl autofs | no |
| exclude inode metrics on filesystems | Do not show metrics (charts) for listed filesystems. This option accepts netdata simple pattern. | msdosfs msdos vfat overlayfs aufs* *unionfs | no |
diff --git a/src/collectors/diskspace.plugin/metadata.yaml b/src/collectors/diskspace.plugin/metadata.yaml
index 578f56bd0..a00a9e91d 100644
--- a/src/collectors/diskspace.plugin/metadata.yaml
+++ b/src/collectors/diskspace.plugin/metadata.yaml
@@ -63,7 +63,7 @@ modules:
required: false
- name: check for new mount points every
description: Parse proc files frequency.
- default_value: 15
+ default_value: 15s
required: false
- name: exclude space metrics on paths
description: Do not show metrics (charts) for listed paths. This option accepts netdata simple pattern.
diff --git a/src/collectors/diskspace.plugin/plugin_diskspace.c b/src/collectors/diskspace.plugin/plugin_diskspace.c
index f1d8909b2..c9f6fe599 100644
--- a/src/collectors/diskspace.plugin/plugin_diskspace.c
+++ b/src/collectors/diskspace.plugin/plugin_diskspace.c
@@ -544,11 +544,11 @@ void *diskspace_slow_worker(void *ptr)
usec_t step = slow_update_every * USEC_PER_SEC;
usec_t real_step = USEC_PER_SEC;
heartbeat_t hb;
- heartbeat_init(&hb);
+ heartbeat_init(&hb, USEC_PER_SEC);
while(service_running(SERVICE_COLLECTORS)) {
worker_is_idle();
- heartbeat_next(&hb, USEC_PER_SEC);
+ heartbeat_next(&hb);
if (real_step < step) {
real_step += USEC_PER_SEC;
@@ -629,7 +629,7 @@ static void diskspace_main_cleanup(void *pptr) {
#error WORKER_UTILIZATION_MAX_JOB_TYPES has to be at least 3
#endif
-int diskspace_function_mount_points(BUFFER *wb, const char *function __maybe_unused) {
+static int diskspace_function_mount_points(BUFFER *wb, const char *function __maybe_unused, BUFFER *payload __maybe_unused, const char *source __maybe_unused) {
netdata_mutex_lock(&slow_mountinfo_mutex);
buffer_flush(wb);
@@ -849,17 +849,20 @@ void *diskspace_main(void *ptr) {
worker_register_job_name(WORKER_JOB_CLEANUP, "cleanup");
rrd_function_add_inline(localhost, NULL, "mount-points", 10,
- RRDFUNCTIONS_PRIORITY_DEFAULT, RRDFUNCTIONS_DISKSPACE_HELP,
+ RRDFUNCTIONS_PRIORITY_DEFAULT, RRDFUNCTIONS_VERSION_DEFAULT,
+ RRDFUNCTIONS_DISKSPACE_HELP,
"top", HTTP_ACCESS_ANONYMOUS_DATA,
diskspace_function_mount_points);
cleanup_mount_points = config_get_boolean(CONFIG_SECTION_DISKSPACE, "remove charts of unmounted disks" , cleanup_mount_points);
- int update_every = (int)config_get_number(CONFIG_SECTION_DISKSPACE, "update every", localhost->rrd_update_every);
- if(update_every < localhost->rrd_update_every)
+ int update_every = (int)config_get_duration_seconds(CONFIG_SECTION_DISKSPACE, "update every", localhost->rrd_update_every);
+ if(update_every < localhost->rrd_update_every) {
update_every = localhost->rrd_update_every;
+ config_set_duration_seconds(CONFIG_SECTION_DISKSPACE, "update every", update_every);
+ }
- check_for_new_mountpoints_every = (int)config_get_number(CONFIG_SECTION_DISKSPACE, "check for new mount points every", check_for_new_mountpoints_every);
+ check_for_new_mountpoints_every = (int)config_get_duration_seconds(CONFIG_SECTION_DISKSPACE, "check for new mount points every", check_for_new_mountpoints_every);
if(check_for_new_mountpoints_every < update_every)
check_for_new_mountpoints_every = update_every;
@@ -873,12 +876,11 @@ void *diskspace_main(void *ptr) {
diskspace_slow_worker,
&slow_worker_data);
- usec_t step = update_every * USEC_PER_SEC;
heartbeat_t hb;
- heartbeat_init(&hb);
+ heartbeat_init(&hb, update_every * USEC_PER_SEC);
while(service_running(SERVICE_COLLECTORS)) {
worker_is_idle();
- /* usec_t hb_dt = */ heartbeat_next(&hb, step);
+ /* usec_t hb_dt = */ heartbeat_next(&hb);
if(unlikely(!service_running(SERVICE_COLLECTORS))) break;
diff --git a/src/collectors/ebpf.plugin/README.md b/src/collectors/ebpf.plugin/README.md
index e9243966b..1246fec04 100644
--- a/src/collectors/ebpf.plugin/README.md
+++ b/src/collectors/ebpf.plugin/README.md
@@ -1,16 +1,6 @@
-<!--
-title: "Kernel traces/metrics (eBPF) monitoring with Netdata"
-description: "Use Netdata's extended Berkeley Packet Filter (eBPF) collector to monitor kernel-level metrics about yourcomplex applications with per-second granularity."
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/collectors/ebpf.plugin/README.md"
-sidebar_label: "Kernel traces/metrics (eBPF)"
-learn_status: "Published"
-learn_topic_type: "References"
-learn_rel_path: "Integrations/Monitor/System metrics"
--->
-
# Kernel traces/metrics (eBPF) collector
-The Netdata Agent provides many [eBPF](https://ebpf.io/what-is-ebpf/) programs to help you troubleshoot and debug how applications interact with the Linux kernel. The `ebpf.plugin` uses [tracepoints, trampoline, and2 kprobes](#how-netdata-collects-data-using-probes-and-tracepoints) to collect a wide array of high value data about the host that would otherwise be impossible to capture.
+The Netdata Agent provides many [eBPF](https://ebpf.io/what-is-ebpf/) programs to help you troubleshoot and debug how applications interact with the Linux kernel. The `ebpf.plugin` uses [tracepoints, trampoline, and2 kprobes](#how-netdata-collects-data-using-probes-and-tracepoints) to collect a wide array of high value data about the host that would otherwise be impossible to capture.
> ❗ eBPF monitoring only works on Linux systems and with specific Linux kernels, including all kernels newer than `4.11.0`, and all kernels on CentOS 7.6 or later. For kernels older than `4.11.0`, improved support is in active development.
@@ -26,10 +16,10 @@ For hands-on configuration and troubleshooting tips see our [tutorial on trouble
Netdata uses the following features from the Linux kernel to run eBPF programs:
-- Tracepoints are hooks to call specific functions. Tracepoints are more stable than `kprobes` and are preferred when
+- Tracepoints are hooks to call specific functions. Tracepoints are more stable than `kprobes` and are preferred when
both options are available.
-- Trampolines are bridges between kernel functions, and BPF programs. Netdata uses them by default whenever available.
-- Kprobes and return probes (`kretprobe`): Probes can insert virtually into any kernel instruction. When eBPF runs in `entry` mode, it attaches only `kprobes` for internal functions monitoring calls and some arguments every time a function is called. The user can also change configuration to use [`return`](#global-configuration-options) mode, and this will allow users to monitor return from these functions and detect possible failures.
+- Trampolines are bridges between kernel functions, and BPF programs. Netdata uses them by default whenever available.
+- Kprobes and return probes (`kretprobe`): Probes can insert virtually into any kernel instruction. When eBPF runs in `entry` mode, it attaches only `kprobes` for internal functions monitoring calls and some arguments every time a function is called. The user can also change configuration to use [`return`](#global-configuration-options) mode, and this will allow users to monitor return from these functions and detect possible failures.
In each case, wherever a normal kprobe, kretprobe, or tracepoint would have run its hook function, an eBPF program is run instead, performing various collection logic before letting the kernel continue its normal control flow.
@@ -38,42 +28,45 @@ There are more methods to trigger eBPF programs, such as uprobes, but currently
## Configuring ebpf.plugin
The eBPF collector is installed and enabled by default on most new installations of the Agent.
-If your Agent is v1.22 or older, you may to enable the collector yourself.
+If your Agent is v1.22 or older, you may to enable the collector yourself.
### Enable the eBPF collector
-To enable or disable the entire eBPF collector:
+To enable or disable the entire eBPF collector:
+
+1. Navigate to the [Netdata config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
-1. Navigate to the [Netdata config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
```bash
cd /etc/netdata
```
-2. Use the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-netdataconf) script to edit `netdata.conf`.
+2. Use the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script to edit `netdata.conf`.
```bash
./edit-config netdata.conf
```
-3. Enable the collector by scrolling down to the `[plugins]` section. Uncomment the line `ebpf` (not
+3. Enable the collector by scrolling down to the `[plugins]` section. Uncomment the line `ebpf` (not
`ebpf_process`) and set it to `yes`.
- ```conf
+ ```text
[plugins]
ebpf = yes
```
### Configure the eBPF collector
-You can configure the eBPF collector's behavior to fine-tune which metrics you receive and [optimize performance]\(#performance opimization).
+You can configure the eBPF collector's behavior to fine-tune which metrics you receive and [optimize performance](#performance-opimization).
To edit the `ebpf.d.conf`:
-1. Navigate to the [Netdata config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+1. Navigate to the [Netdata config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
```bash
cd /etc/netdata
```
-2. Use the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-netdataconf) script to edit [`ebpf.d.conf`](https://github.com/netdata/netdata/blob/master/src/collectors/ebpf.plugin/ebpf.d.conf).
+
+2. Use the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script to edit [`ebpf.d.conf`](https://github.com/netdata/netdata/blob/master/src/collectors/ebpf.plugin/ebpf.d.conf).
```bash
./edit-config ebpf.d.conf
@@ -94,9 +87,9 @@ By default, this plugin uses the `entry` mode. Changing this mode can create sig
system, but also offer valuable information if you are developing or debugging software. The `ebpf load mode` option
accepts the following values:
-- `entry`: This is the default mode. In this mode, the eBPF collector only monitors calls for the functions described in
+- `entry`: This is the default mode. In this mode, the eBPF collector only monitors calls for the functions described in
the sections above, and does not show charts related to errors.
-- `return`: In the `return` mode, the eBPF collector monitors the same kernel functions as `entry`, but also creates new
+- `return`: In the `return` mode, the eBPF collector monitors the same kernel functions as `entry`, but also creates new
charts for the return of these functions, such as errors. Monitoring function returns can help in debugging software,
such as failing to close file descriptors or creating zombie processes.
@@ -108,7 +101,7 @@ interact with the Linux kernel.
If you want to enable `apps.plugin` integration, change the "apps" setting to "yes".
-```conf
+```text
[global]
apps = yes
```
@@ -122,7 +115,7 @@ interacts with the Linux kernel.
The integration with `cgroups.plugin` is disabled by default to avoid creating overhead on your system. If you want to
_enable_ the integration with `cgroups.plugin`, change the `cgroups` setting to `yes`.
-```conf
+```text
[global]
cgroups = yes
```
@@ -133,10 +126,7 @@ If you do not need to monitor specific metrics for your `cgroups`, you can enabl
#### Maps per Core
-When netdata is running on kernels newer than `4.6` users are allowed to modify how the `ebpf.plugin` creates maps (hash or
-array). When `maps per core` is defined as `yes`, plugin will create a map per core on host, on the other hand,
-when the value is set as `no` only one hash table will be created, this option will use less memory, but it also can
-increase overhead for processes.
+When netdata is running on kernels newer than `4.6` users are allowed to modify how the `ebpf.plugin` creates maps (hash or array). When `maps per core` is defined as `yes`, plugin will create a map per core on host, on the other hand, when the value is set as `no` only one hash table will be created, this option will use less memory, but it also can increase overhead for processes.
#### Collect PID
@@ -146,10 +136,10 @@ process group for which it needs to plot data.
There are different ways to collect PID, and you can select the way `ebpf.plugin` collects data with the following
values:
-- `real parent`: This is the default mode. Collection will aggregate data for the real parent, the thread that creates
+- `real parent`: This is the default mode. Collection will aggregate data for the real parent, the thread that creates
child threads.
-- `parent`: Parent and real parent are the same when a process starts, but this value can be changed during run time.
-- `all`: This option will store all PIDs that run on the host. Note, this method can be expensive for the host,
+- `parent`: Parent and real parent are the same when a process starts, but this value can be changed during run time.
+- `all`: This option will store all PIDs that run on the host. Note, this method can be expensive for the host,
because more memory needs to be allocated and parsed.
The threads that have integration with other collectors have an internal clean up wherein they attach either a
@@ -174,97 +164,97 @@ Linux metrics:
> Note: The parenthetical accompanying each bulleted item provides the chart name.
-- mem
- - Number of processes killed due out of memory. (`oomkills`)
-- process
- - Number of processes created with `do_fork`. (`process_create`)
- - Number of threads created with `do_fork` or `clone (2)`, depending on your system's kernel
+- mem
+ - Number of processes killed due out of memory. (`oomkills`)
+- process
+ - Number of processes created with `do_fork`. (`process_create`)
+ - Number of threads created with `do_fork` or `clone (2)`, depending on your system's kernel
version. (`thread_create`)
- - Number of times that a process called `do_exit`. (`task_exit`)
- - Number of times that a process called `release_task`. (`task_close`)
- - Number of times that an error happened to create thread or process. (`task_error`)
-- swap
- - Number of calls to `swap_readpage`. (`swap_read_call`)
- - Number of calls to `swap_writepage`. (`swap_write_call`)
-- network
- - Number of outbound connections using TCP/IPv4. (`outbound_conn_ipv4`)
- - Number of outbound connections using TCP/IPv6. (`outbound_conn_ipv6`)
- - Number of bytes sent. (`total_bandwidth_sent`)
- - Number of bytes received. (`total_bandwidth_recv`)
- - Number of calls to `tcp_sendmsg`. (`bandwidth_tcp_send`)
- - Number of calls to `tcp_cleanup_rbuf`. (`bandwidth_tcp_recv`)
- - Number of calls to `tcp_retransmit_skb`. (`bandwidth_tcp_retransmit`)
- - Number of calls to `udp_sendmsg`. (`bandwidth_udp_send`)
- - Number of calls to `udp_recvmsg`. (`bandwidth_udp_recv`)
-- file access
- - Number of calls to open files. (`file_open`)
- - Number of calls to open files that returned errors. (`open_error`)
- - Number of files closed. (`file_closed`)
- - Number of calls to close files that returned errors. (`file_error_closed`)
-- vfs
- - Number of calls to `vfs_unlink`. (`file_deleted`)
- - Number of calls to `vfs_write`. (`vfs_write_call`)
- - Number of calls to write a file that returned errors. (`vfs_write_error`)
- - Number of calls to `vfs_read`. (`vfs_read_call`)
- - - Number of calls to read a file that returned errors. (`vfs_read_error`)
- - Number of bytes written with `vfs_write`. (`vfs_write_bytes`)
- - Number of bytes read with `vfs_read`. (`vfs_read_bytes`)
- - Number of calls to `vfs_fsync`. (`vfs_fsync`)
- - Number of calls to sync file that returned errors. (`vfs_fsync_error`)
- - Number of calls to `vfs_open`. (`vfs_open`)
- - Number of calls to open file that returned errors. (`vfs_open_error`)
- - Number of calls to `vfs_create`. (`vfs_create`)
- - Number of calls to open file that returned errors. (`vfs_create_error`)
-- page cache
- - Ratio of pages accessed. (`cachestat_ratio`)
- - Number of modified pages ("dirty"). (`cachestat_dirties`)
- - Number of accessed pages. (`cachestat_hits`)
- - Number of pages brought from disk. (`cachestat_misses`)
-- directory cache
- - Ratio of files available in directory cache. (`dc_hit_ratio`)
- - Number of files accessed. (`dc_reference`)
- - Number of files accessed that were not in cache. (`dc_not_cache`)
- - Number of files not found. (`dc_not_found`)
-- ipc shm
- - Number of calls to `shm_get`. (`shmget_call`)
- - Number of calls to `shm_at`. (`shmat_call`)
- - Number of calls to `shm_dt`. (`shmdt_call`)
- - Number of calls to `shm_ctl`. (`shmctl_call`)
+ - Number of times that a process called `do_exit`. (`task_exit`)
+ - Number of times that a process called `release_task`. (`task_close`)
+ - Number of times that an error happened to create thread or process. (`task_error`)
+- swap
+ - Number of calls to `swap_readpage`. (`swap_read_call`)
+ - Number of calls to `swap_writepage`. (`swap_write_call`)
+- network
+ - Number of outbound connections using TCP/IPv4. (`outbound_conn_ipv4`)
+ - Number of outbound connections using TCP/IPv6. (`outbound_conn_ipv6`)
+ - Number of bytes sent. (`total_bandwidth_sent`)
+ - Number of bytes received. (`total_bandwidth_recv`)
+ - Number of calls to `tcp_sendmsg`. (`bandwidth_tcp_send`)
+ - Number of calls to `tcp_cleanup_rbuf`. (`bandwidth_tcp_recv`)
+ - Number of calls to `tcp_retransmit_skb`. (`bandwidth_tcp_retransmit`)
+ - Number of calls to `udp_sendmsg`. (`bandwidth_udp_send`)
+ - Number of calls to `udp_recvmsg`. (`bandwidth_udp_recv`)
+- file access
+ - Number of calls to open files. (`file_open`)
+ - Number of calls to open files that returned errors. (`open_error`)
+ - Number of files closed. (`file_closed`)
+ - Number of calls to close files that returned errors. (`file_error_closed`)
+- vfs
+ - Number of calls to `vfs_unlink`. (`file_deleted`)
+ - Number of calls to `vfs_write`. (`vfs_write_call`)
+ - Number of calls to write a file that returned errors. (`vfs_write_error`)
+ - Number of calls to `vfs_read`. (`vfs_read_call`)
+ - - Number of calls to read a file that returned errors. (`vfs_read_error`)
+ - Number of bytes written with `vfs_write`. (`vfs_write_bytes`)
+ - Number of bytes read with `vfs_read`. (`vfs_read_bytes`)
+ - Number of calls to `vfs_fsync`. (`vfs_fsync`)
+ - Number of calls to sync file that returned errors. (`vfs_fsync_error`)
+ - Number of calls to `vfs_open`. (`vfs_open`)
+ - Number of calls to open file that returned errors. (`vfs_open_error`)
+ - Number of calls to `vfs_create`. (`vfs_create`)
+ - Number of calls to open file that returned errors. (`vfs_create_error`)
+- page cache
+ - Ratio of pages accessed. (`cachestat_ratio`)
+ - Number of modified pages ("dirty"). (`cachestat_dirties`)
+ - Number of accessed pages. (`cachestat_hits`)
+ - Number of pages brought from disk. (`cachestat_misses`)
+- directory cache
+ - Ratio of files available in directory cache. (`dc_hit_ratio`)
+ - Number of files accessed. (`dc_reference`)
+ - Number of files accessed that were not in cache. (`dc_not_cache`)
+ - Number of files not found. (`dc_not_found`)
+- ipc shm
+ - Number of calls to `shm_get`. (`shmget_call`)
+ - Number of calls to `shm_at`. (`shmat_call`)
+ - Number of calls to `shm_dt`. (`shmdt_call`)
+ - Number of calls to `shm_ctl`. (`shmctl_call`)
### `[ebpf programs]` configuration options
The eBPF collector enables and runs the following eBPF programs by default:
-- `cachestat`: Netdata's eBPF data collector creates charts about the memory page cache. When the integration with
+- `cachestat`: Netdata's eBPF data collector creates charts about the memory page cache. When the integration with
[`apps.plugin`](/src/collectors/apps.plugin/README.md) is enabled, this collector creates charts for the whole host _and_
for each application.
-- `fd` : This eBPF program creates charts that show information about calls to open files.
-- `mount`: This eBPF program creates charts that show calls to syscalls mount(2) and umount(2).
-- `shm`: This eBPF program creates charts that show calls to syscalls shmget(2), shmat(2), shmdt(2) and shmctl(2).
-- `process`: This eBPF program creates charts that show information about process life. When in `return` mode, it also
+- `fd` : This eBPF program creates charts that show information about calls to open files.
+- `mount`: This eBPF program creates charts that show calls to syscalls mount(2) and umount(2).
+- `shm`: This eBPF program creates charts that show calls to syscalls shmget(2), shmat(2), shmdt(2) and shmctl(2).
+- `process`: This eBPF program creates charts that show information about process life. When in `return` mode, it also
creates charts showing errors when these operations are executed.
-- `hardirq`: This eBPF program creates charts that show information about time spent servicing individual hardware
+- `hardirq`: This eBPF program creates charts that show information about time spent servicing individual hardware
interrupt requests (hard IRQs).
-- `softirq`: This eBPF program creates charts that show information about time spent servicing individual software
+- `softirq`: This eBPF program creates charts that show information about time spent servicing individual software
interrupt requests (soft IRQs).
-- `oomkill`: This eBPF program creates a chart that shows OOM kills for all applications recognized via
+- `oomkill`: This eBPF program creates a chart that shows OOM kills for all applications recognized via
the `apps.plugin` integration. Note that this program will show application charts regardless of whether apps
integration is turned on or off.
You can also enable the following eBPF programs:
-- `dcstat` : This eBPF program creates charts that show information about file access using directory cache. It appends
+- `dcstat` : This eBPF program creates charts that show information about file access using directory cache. It appends
`kprobes` for `lookup_fast()` and `d_lookup()` to identify if files are inside directory cache, outside and files are
not found.
-- `disk` : This eBPF program creates charts that show information about disk latency independent of filesystem.
-- `filesystem` : This eBPF program creates charts that show information about some filesystem latency.
-- `swap` : This eBPF program creates charts that show information about swap access.
-- `mdflush`: This eBPF program creates charts that show information about
-- `sync`: Monitor calls to syscalls sync(2), fsync(2), fdatasync(2), syncfs(2), msync(2), and sync_file_range(2).
-- `socket`: This eBPF program creates charts with information about `TCP` and `UDP` functions, including the
+- `disk` : This eBPF program creates charts that show information about disk latency independent of filesystem.
+- `filesystem` : This eBPF program creates charts that show information about some filesystem latency.
+- `swap` : This eBPF program creates charts that show information about swap access.
+- `mdflush`: This eBPF program creates charts that show information about
+- `sync`: Monitor calls to syscalls sync(2), fsync(2), fdatasync(2), syncfs(2), msync(2), and sync_file_range(2).
+- `socket`: This eBPF program creates charts with information about `TCP` and `UDP` functions, including the
bandwidth consumed by each.
multi-device software flushes.
-- `vfs`: This eBPF program creates charts that show information about VFS (Virtual File System) functions.
+- `vfs`: This eBPF program creates charts that show information about VFS (Virtual File System) functions.
### Configuring eBPF threads
@@ -272,24 +262,26 @@ You can configure each thread of the eBPF data collector. This allows you to ove
To configure an eBPF thread:
-1. Navigate to the [Netdata config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+1. Navigate to the [Netdata config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
```bash
cd /etc/netdata
```
-2. Use the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-netdataconf) script to edit a thread configuration file. The following configuration files are available:
- - `network.conf`: Configuration for the [`network` thread](#network-configuration). This config file overwrites the global options and also
+2. Use the [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script to edit a thread configuration file. The following configuration files are available:
+
+ - `network.conf`: Configuration for the [`network` thread](#network-configuration). This config file overwrites the global options and also
lets you specify which network the eBPF collector monitors.
- - `process.conf`: Configuration for the [`process` thread](#sync-configuration).
- - `cachestat.conf`: Configuration for the `cachestat` thread(#filesystem-configuration).
- - `dcstat.conf`: Configuration for the `dcstat` thread.
- - `disk.conf`: Configuration for the `disk` thread.
- - `fd.conf`: Configuration for the `file descriptor` thread.
- - `filesystem.conf`: Configuration for the `filesystem` thread.
- - `hardirq.conf`: Configuration for the `hardirq` thread.
- - `softirq.conf`: Configuration for the `softirq` thread.
- - `sync.conf`: Configuration for the `sync` thread.
- - `vfs.conf`: Configuration for the `vfs` thread.
+ - `process.conf`: Configuration for the [`process` thread](#sync-configuration).
+ - `cachestat.conf`: Configuration for the `cachestat` thread(#filesystem-configuration).
+ - `dcstat.conf`: Configuration for the `dcstat` thread.
+ - `disk.conf`: Configuration for the `disk` thread.
+ - `fd.conf`: Configuration for the `file descriptor` thread.
+ - `filesystem.conf`: Configuration for the `filesystem` thread.
+ - `hardirq.conf`: Configuration for the `hardirq` thread.
+ - `softirq.conf`: Configuration for the `softirq` thread.
+ - `sync.conf`: Configuration for the `sync` thread.
+ - `vfs.conf`: Configuration for the `vfs` thread.
```bash
./edit-config FILE.conf
@@ -304,7 +296,7 @@ are divided in the following sections:
You can configure the information shown with function `ebpf_socket` using the settings in this section.
-```conf
+```text
[network connections]
enabled = yes
resolve hostname ips = no
@@ -324,13 +316,13 @@ and `145`.
The following options are available:
-- `enabled`: Disable network connections monitoring. This can affect directly some funcion output.
-- `resolve hostname ips`: Enable resolving IPs to hostnames. It is disabled by default because it can be too slow.
-- `resolve service names`: Convert destination ports into service names, for example, port `53` protocol `UDP` becomes `domain`.
+- `enabled`: Disable network connections monitoring. This can affect directly some funcion output.
+- `resolve hostname ips`: Enable resolving IPs to hostnames. It is disabled by default because it can be too slow.
+- `resolve service names`: Convert destination ports into service names, for example, port `53` protocol `UDP` becomes `domain`.
all names are read from /etc/services.
-- `ports`: Define the destination ports for Netdata to monitor.
-- `hostnames`: The list of hostnames that can be resolved to an IP address.
-- `ips`: The IP or range of IPs that you want to monitor. You can use IPv4 or IPv6 addresses, use dashes to define a
+- `ports`: Define the destination ports for Netdata to monitor.
+- `hostnames`: The list of hostnames that can be resolved to an IP address.
+- `ips`: The IP or range of IPs that you want to monitor. You can use IPv4 or IPv6 addresses, use dashes to define a
range of IPs, or use CIDR values.
By default the traffic table is created using the destination IPs and ports of the sockets. This can be
@@ -346,7 +338,7 @@ section.
For example, Netdata's default port (`19999`) is not listed in `/etc/services`. To associate that port with the Netdata
service in network connection charts, and thus see the name of the service instead of its port, define it:
-```conf
+```text
[service name]
19999 = Netdata
```
@@ -355,7 +347,7 @@ service in network connection charts, and thus see the name of the service inste
The sync configuration has specific options to disable monitoring for syscalls. All syscalls are monitored by default.
-```conf
+```text
[syscalls]
sync = yes
msync = yes
@@ -370,7 +362,7 @@ The sync configuration has specific options to disable monitoring for syscalls.
The filesystem configuration has specific options to disable monitoring for filesystems; by default, all filesystems are
monitored.
-```conf
+```text
[filesystem]
btrfsdist = yes
ext4dist = yes
@@ -408,19 +400,18 @@ You can run our helper script to determine whether your system can support eBPF
curl -sSL https://raw.githubusercontent.com/netdata/kernel-collector/master/tools/check-kernel-config.sh | sudo bash
```
-
If you see a warning about a missing kernel
configuration (`KPROBES KPROBES_ON_FTRACE HAVE_KPROBES BPF BPF_SYSCALL BPF_JIT`), you will need to recompile your kernel
to support this configuration. The process of recompiling Linux kernels varies based on your distribution and version.
Read the documentation for your system's distribution to learn more about the specific workflow for recompiling the
kernel, ensuring that you set all the necessary
-- [Ubuntu](https://wiki.ubuntu.com/Kernel/BuildYourOwnKernel)
-- [Debian](https://kernel-team.pages.debian.net/kernel-handbook/ch-common-tasks.html#s-common-official)
-- [Fedora](https://fedoraproject.org/wiki/Building_a_custom_kernel)
-- [CentOS](https://wiki.centos.org/HowTos/Custom_Kernel)
-- [Arch Linux](https://wiki.archlinux.org/index.php/Kernel/Traditional_compilation)
-- [Slackware](https://docs.slackware.com/howtos:slackware_admin:kernelbuilding)
+- [Ubuntu](https://wiki.ubuntu.com/Kernel/BuildYourOwnKernel)
+- [Debian](https://kernel-team.pages.debian.net/kernel-handbook/ch-common-tasks.html#s-common-official)
+- [Fedora](https://fedoraproject.org/wiki/Building_a_custom_kernel)
+- [CentOS](https://wiki.centos.org/HowTos/Custom_Kernel)
+- [Arch Linux](https://wiki.archlinux.org/index.php/Kernel/Traditional_compilation)
+- [Slackware](https://docs.slackware.com/howtos:slackware_admin:kernelbuilding)
### Mount `debugfs` and `tracefs`
@@ -455,12 +446,12 @@ Internally, the Linux kernel treats both processes and threads as `tasks`. To cr
system calls: `fork(2)`, `vfork(2)`, and `clone(2)`. To generate this chart, the eBPF
collector uses the following `tracepoints` and `kprobe`:
-- `sched/sched_process_fork`: Tracepoint called after a call for `fork (2)`, `vfork (2)` and `clone (2)`.
-- `sched/sched_process_exec`: Tracepoint called after a exec-family syscall.
-- `kprobe/kernel_clone`: This is the main [`fork()`](https://elixir.bootlin.com/linux/v5.10/source/kernel/fork.c#L2415)
+- `sched/sched_process_fork`: Tracepoint called after a call for `fork (2)`, `vfork (2)` and `clone (2)`.
+- `sched/sched_process_exec`: Tracepoint called after a exec-family syscall.
+- `kprobe/kernel_clone`: This is the main [`fork()`](https://elixir.bootlin.com/linux/v5.10/source/kernel/fork.c#L2415)
routine since kernel `5.10.0` was released.
-- `kprobe/_do_fork`: Like `kernel_clone`, but this was the main function between kernels `4.2.0` and `5.9.16`
-- `kprobe/do_fork`: This was the main function before kernel `4.2.0`.
+- `kprobe/_do_fork`: Like `kernel_clone`, but this was the main function between kernels `4.2.0` and `5.9.16`
+- `kprobe/do_fork`: This was the main function before kernel `4.2.0`.
#### Process Exit
@@ -469,8 +460,8 @@ system that the task is finishing its work. The second step is to release the ke
function `release_task`. The difference between the two dimensions can help you discover
[zombie processes](https://en.wikipedia.org/wiki/Zombie_process). To get the metrics, the collector uses:
-- `sched/sched_process_exit`: Tracepoint called after a task exits.
-- `kprobe/release_task`: This function is called when a process exits, as the kernel still needs to remove the process
+- `sched/sched_process_exit`: Tracepoint called after a task exits.
+- `kprobe/release_task`: This function is called when a process exits, as the kernel still needs to remove the process
descriptor.
#### Task error
@@ -489,9 +480,9 @@ the collector attaches `kprobes` for cited functions.
The following `tracepoints` are used to measure time usage for soft IRQs:
-- [`irq/softirq_entry`](https://www.kernel.org/doc/html/latest/core-api/tracepoint.html#c.trace_softirq_entry): Called
+- [`irq/softirq_entry`](https://www.kernel.org/doc/html/latest/core-api/tracepoint.html#c.trace_softirq_entry): Called
before softirq handler
-- [`irq/softirq_exit`](https://www.kernel.org/doc/html/latest/core-api/tracepoint.html#c.trace_softirq_exit): Called when
+- [`irq/softirq_exit`](https://www.kernel.org/doc/html/latest/core-api/tracepoint.html#c.trace_softirq_exit): Called when
softirq handler returns.
#### Hard IRQ
@@ -499,60 +490,60 @@ The following `tracepoints` are used to measure time usage for soft IRQs:
The following tracepoints are used to measure the latency of servicing a
hardware interrupt request (hard IRQ).
-- [`irq/irq_handler_entry`](https://www.kernel.org/doc/html/latest/core-api/tracepoint.html#c.trace_irq_handler_entry):
+- [`irq/irq_handler_entry`](https://www.kernel.org/doc/html/latest/core-api/tracepoint.html#c.trace_irq_handler_entry):
Called immediately before the IRQ action handler.
-- [`irq/irq_handler_exit`](https://www.kernel.org/doc/html/latest/core-api/tracepoint.html#c.trace_irq_handler_exit):
+- [`irq/irq_handler_exit`](https://www.kernel.org/doc/html/latest/core-api/tracepoint.html#c.trace_irq_handler_exit):
Called immediately after the IRQ action handler returns.
-- `irq_vectors`: These are traces from `irq_handler_entry` and
+- `irq_vectors`: These are traces from `irq_handler_entry` and
`irq_handler_exit` when an IRQ is handled. The following elements from vector
are triggered:
- - `irq_vectors/local_timer_entry`
- - `irq_vectors/local_timer_exit`
- - `irq_vectors/reschedule_entry`
- - `irq_vectors/reschedule_exit`
- - `irq_vectors/call_function_entry`
- - `irq_vectors/call_function_exit`
- - `irq_vectors/call_function_single_entry`
- - `irq_vectors/call_function_single_xit`
- - `irq_vectors/irq_work_entry`
- - `irq_vectors/irq_work_exit`
- - `irq_vectors/error_apic_entry`
- - `irq_vectors/error_apic_exit`
- - `irq_vectors/thermal_apic_entry`
- - `irq_vectors/thermal_apic_exit`
- - `irq_vectors/threshold_apic_entry`
- - `irq_vectors/threshold_apic_exit`
- - `irq_vectors/deferred_error_entry`
- - `irq_vectors/deferred_error_exit`
- - `irq_vectors/spurious_apic_entry`
- - `irq_vectors/spurious_apic_exit`
- - `irq_vectors/x86_platform_ipi_entry`
- - `irq_vectors/x86_platform_ipi_exit`
+ - `irq_vectors/local_timer_entry`
+ - `irq_vectors/local_timer_exit`
+ - `irq_vectors/reschedule_entry`
+ - `irq_vectors/reschedule_exit`
+ - `irq_vectors/call_function_entry`
+ - `irq_vectors/call_function_exit`
+ - `irq_vectors/call_function_single_entry`
+ - `irq_vectors/call_function_single_xit`
+ - `irq_vectors/irq_work_entry`
+ - `irq_vectors/irq_work_exit`
+ - `irq_vectors/error_apic_entry`
+ - `irq_vectors/error_apic_exit`
+ - `irq_vectors/thermal_apic_entry`
+ - `irq_vectors/thermal_apic_exit`
+ - `irq_vectors/threshold_apic_entry`
+ - `irq_vectors/threshold_apic_exit`
+ - `irq_vectors/deferred_error_entry`
+ - `irq_vectors/deferred_error_exit`
+ - `irq_vectors/spurious_apic_entry`
+ - `irq_vectors/spurious_apic_exit`
+ - `irq_vectors/x86_platform_ipi_entry`
+ - `irq_vectors/x86_platform_ipi_exit`
#### IPC shared memory
To monitor shared memory system call counts, Netdata attaches tracing in the following functions:
-- `shmget`: Runs when [`shmget`](https://man7.org/linux/man-pages/man2/shmget.2.html) is called.
-- `shmat`: Runs when [`shmat`](https://man7.org/linux/man-pages/man2/shmat.2.html) is called.
-- `shmdt`: Runs when [`shmdt`](https://man7.org/linux/man-pages/man2/shmat.2.html) is called.
-- `shmctl`: Runs when [`shmctl`](https://man7.org/linux/man-pages/man2/shmctl.2.html) is called.
+- `shmget`: Runs when [`shmget`](https://man7.org/linux/man-pages/man2/shmget.2.html) is called.
+- `shmat`: Runs when [`shmat`](https://man7.org/linux/man-pages/man2/shmat.2.html) is called.
+- `shmdt`: Runs when [`shmdt`](https://man7.org/linux/man-pages/man2/shmat.2.html) is called.
+- `shmctl`: Runs when [`shmctl`](https://man7.org/linux/man-pages/man2/shmctl.2.html) is called.
### Memory
In the memory submenu the eBPF plugin creates two submenus **page cache** and **synchronization** with the following
organization:
-- Page Cache
- - Page cache ratio
- - Dirty pages
- - Page cache hits
- - Page cache misses
-- Synchronization
- - File sync
- - Memory map sync
- - File system sync
- - File range sync
+- Page Cache
+ - Page cache ratio
+ - Dirty pages
+ - Page cache hits
+ - Page cache misses
+- Synchronization
+ - File sync
+ - Memory map sync
+ - File system sync
+ - File range sync
#### Page cache hits
@@ -587,10 +578,10 @@ The chart `cachestat_ratio` shows how processes are accessing page cache. In a n
100%, which means that the majority of the work on the machine is processed in memory. To calculate the ratio, Netdata
attaches `kprobes` for kernel functions:
-- `add_to_page_cache_lru`: Page addition.
-- `mark_page_accessed`: Access to cache.
-- `account_page_dirtied`: Dirty (modified) pages.
-- `mark_buffer_dirty`: Writes to page cache.
+- `add_to_page_cache_lru`: Page addition.
+- `mark_page_accessed`: Access to cache.
+- `account_page_dirtied`: Dirty (modified) pages.
+- `mark_buffer_dirty`: Writes to page cache.
#### Page cache misses
@@ -629,7 +620,7 @@ in [disk latency](#disk) charts.
By default, MD flush is disabled. To enable it, configure your
`/etc/netdata/ebpf.d.conf` file as:
-```conf
+```text
[global]
mdflush = yes
```
@@ -638,7 +629,7 @@ By default, MD flush is disabled. To enable it, configure your
To collect data related to Linux multi-device (MD) flushing, the following kprobe is used:
-- `kprobe/md_flush_request`: called whenever a request for flushing multi-device data is made.
+- `kprobe/md_flush_request`: called whenever a request for flushing multi-device data is made.
### Disk
@@ -648,9 +639,9 @@ The eBPF plugin also shows a chart in the Disk section when the `disk` thread is
This will create the chart `disk_latency_io` for each disk on the host. The following tracepoints are used:
-- [`block/block_rq_issue`](https://www.kernel.org/doc/html/latest/core-api/tracepoint.html#c.trace_block_rq_issue):
+- [`block/block_rq_issue`](https://www.kernel.org/doc/html/latest/core-api/tracepoint.html#c.trace_block_rq_issue):
IO request operation to a device drive.
-- [`block/block_rq_complete`](https://www.kernel.org/doc/html/latest/core-api/tracepoint.html#c.trace_block_rq_complete):
+- [`block/block_rq_complete`](https://www.kernel.org/doc/html/latest/core-api/tracepoint.html#c.trace_block_rq_complete):
IO operation completed by device.
Disk Latency is the single most important metric to focus on when it comes to storage performance, under most circumstances.
@@ -675,10 +666,10 @@ To measure the latency of executing some actions in an
collector needs to attach `kprobes` and `kretprobes` for each of the following
functions:
-- `ext4_file_read_iter`: Function used to measure read latency.
-- `ext4_file_write_iter`: Function used to measure write latency.
-- `ext4_file_open`: Function used to measure open latency.
-- `ext4_sync_file`: Function used to measure sync latency.
+- `ext4_file_read_iter`: Function used to measure read latency.
+- `ext4_file_write_iter`: Function used to measure write latency.
+- `ext4_file_open`: Function used to measure open latency.
+- `ext4_sync_file`: Function used to measure sync latency.
#### ZFS
@@ -686,10 +677,10 @@ To measure the latency of executing some actions in a zfs filesystem, the
collector needs to attach `kprobes` and `kretprobes` for each of the following
functions:
-- `zpl_iter_read`: Function used to measure read latency.
-- `zpl_iter_write`: Function used to measure write latency.
-- `zpl_open`: Function used to measure open latency.
-- `zpl_fsync`: Function used to measure sync latency.
+- `zpl_iter_read`: Function used to measure read latency.
+- `zpl_iter_write`: Function used to measure write latency.
+- `zpl_open`: Function used to measure open latency.
+- `zpl_fsync`: Function used to measure sync latency.
#### XFS
@@ -698,10 +689,10 @@ To measure the latency of executing some actions in an
collector needs to attach `kprobes` and `kretprobes` for each of the following
functions:
-- `xfs_file_read_iter`: Function used to measure read latency.
-- `xfs_file_write_iter`: Function used to measure write latency.
-- `xfs_file_open`: Function used to measure open latency.
-- `xfs_file_fsync`: Function used to measure sync latency.
+- `xfs_file_read_iter`: Function used to measure read latency.
+- `xfs_file_write_iter`: Function used to measure write latency.
+- `xfs_file_open`: Function used to measure open latency.
+- `xfs_file_fsync`: Function used to measure sync latency.
#### NFS
@@ -710,11 +701,11 @@ To measure the latency of executing some actions in an
collector needs to attach `kprobes` and `kretprobes` for each of the following
functions:
-- `nfs_file_read`: Function used to measure read latency.
-- `nfs_file_write`: Function used to measure write latency.
-- `nfs_file_open`: Functions used to measure open latency.
-- `nfs4_file_open`: Functions used to measure open latency for NFS v4.
-- `nfs_getattr`: Function used to measure sync latency.
+- `nfs_file_read`: Function used to measure read latency.
+- `nfs_file_write`: Function used to measure write latency.
+- `nfs_file_open`: Functions used to measure open latency.
+- `nfs4_file_open`: Functions used to measure open latency for NFS v4.
+- `nfs_getattr`: Function used to measure sync latency.
#### btrfs
@@ -724,24 +715,24 @@ filesystem, the collector needs to attach `kprobes` and `kretprobes` for each of
> Note: We are listing two functions used to measure `read` latency, but we use either `btrfs_file_read_iter` or
> `generic_file_read_iter`, depending on kernel version.
-- `btrfs_file_read_iter`: Function used to measure read latency since kernel `5.10.0`.
-- `generic_file_read_iter`: Like `btrfs_file_read_iter`, but this function was used before kernel `5.10.0`.
-- `btrfs_file_write_iter`: Function used to write data.
-- `btrfs_file_open`: Function used to open files.
-- `btrfs_sync_file`: Function used to synchronize data to filesystem.
+- `btrfs_file_read_iter`: Function used to measure read latency since kernel `5.10.0`.
+- `generic_file_read_iter`: Like `btrfs_file_read_iter`, but this function was used before kernel `5.10.0`.
+- `btrfs_file_write_iter`: Function used to write data.
+- `btrfs_file_open`: Function used to open files.
+- `btrfs_sync_file`: Function used to synchronize data to filesystem.
#### File descriptor
To give metrics related to `open` and `close` events, instead of attaching kprobes for each syscall used to do these
events, the collector attaches `kprobes` for the common function used for syscalls:
-- [`do_sys_open`](https://0xax.gitbooks.io/linux-insides/content/SysCall/linux-syscall-5.html): Internal function used to
+- [`do_sys_open`](https://0xax.gitbooks.io/linux-insides/content/SysCall/linux-syscall-5.html): Internal function used to
open files.
-- [`do_sys_openat2`](https://elixir.bootlin.com/linux/v5.6/source/fs/open.c#L1162):
+- [`do_sys_openat2`](https://elixir.bootlin.com/linux/v5.6/source/fs/open.c#L1162):
Function called from `do_sys_open` since version `5.6.0`.
-- [`close_fd`](https://www.mail-archive.com/linux-kernel@vger.kernel.org/msg2271761.html): Function used to close file
+- [`close_fd`](https://www.mail-archive.com/linux-kernel@vger.kernel.org/msg2271761.html): Function used to close file
descriptor since kernel `5.11.0`.
-- `__close_fd`: Function used to close files before version `5.11.0`.
+- `__close_fd`: Function used to close files before version `5.11.0`.
#### File error
@@ -761,21 +752,21 @@ To measure the latency and total quantity of executing some VFS-level
functions, ebpf.plugin needs to attach kprobes and kretprobes for each of the
following functions:
-- `vfs_write`: Function used monitoring the number of successful & failed
+- `vfs_write`: Function used monitoring the number of successful & failed
filesystem write calls, as well as the total number of written bytes.
-- `vfs_writev`: Same function as `vfs_write` but for vector writes (i.e. a
+- `vfs_writev`: Same function as `vfs_write` but for vector writes (i.e. a
single write operation using a group of buffers rather than 1).
-- `vfs_read`: Function used for monitoring the number of successful & failed
+- `vfs_read`: Function used for monitoring the number of successful & failed
filesystem read calls, as well as the total number of read bytes.
-- `vfs_readv` Same function as `vfs_read` but for vector reads (i.e. a single
+- `vfs_readv` Same function as `vfs_read` but for vector reads (i.e. a single
read operation using a group of buffers rather than 1).
-- `vfs_unlink`: Function used for monitoring the number of successful & failed
+- `vfs_unlink`: Function used for monitoring the number of successful & failed
filesystem unlink calls.
-- `vfs_fsync`: Function used for monitoring the number of successful & failed
+- `vfs_fsync`: Function used for monitoring the number of successful & failed
filesystem fsync calls.
-- `vfs_open`: Function used for monitoring the number of successful & failed
+- `vfs_open`: Function used for monitoring the number of successful & failed
filesystem open calls.
-- `vfs_create`: Function used for monitoring the number of successful & failed
+- `vfs_create`: Function used for monitoring the number of successful & failed
filesystem create calls.
##### VFS Deleted objects
@@ -816,8 +807,8 @@ Metrics for directory cache are collected using kprobe for `lookup_fast`, becaus
times this function is accessed. On the other hand, for `d_lookup` we are not only interested in the number of times it
is accessed, but also in possible errors, so we need to attach a `kretprobe`. For this reason, the following is used:
-- [`lookup_fast`](https://lwn.net/Articles/649115/): Called to look at data inside the directory cache.
-- [`d_lookup`](https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/tree/fs/dcache.c?id=052b398a43a7de8c68c13e7fa05d6b3d16ce6801#n2223):
+- [`lookup_fast`](https://lwn.net/Articles/649115/): Called to look at data inside the directory cache.
+- [`d_lookup`](https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/tree/fs/dcache.c?id=052b398a43a7de8c68c13e7fa05d6b3d16ce6801#n2223):
Called when the desired file is not inside the directory cache.
##### Directory Cache Interpretation
@@ -830,8 +821,8 @@ accessed before.
The following `tracing` are used to collect `mount` & `unmount` call counts:
-- [`mount`](https://man7.org/linux/man-pages/man2/mount.2.html): mount filesystem on host.
-- [`umount`](https://man7.org/linux/man-pages/man2/umount.2.html): umount filesystem on host.
+- [`mount`](https://man7.org/linux/man-pages/man2/mount.2.html): mount filesystem on host.
+- [`umount`](https://man7.org/linux/man-pages/man2/umount.2.html): umount filesystem on host.
### Networking Stack
@@ -855,10 +846,10 @@ to send & receive data and to close connections when `TCP` protocol is used.
This chart demonstrates calls to functions:
-- `tcp_sendmsg`: Function responsible to send data for a specified destination.
-- `tcp_cleanup_rbuf`: We use this function instead of `tcp_recvmsg`, because the last one misses `tcp_read_sock` traffic
+- `tcp_sendmsg`: Function responsible to send data for a specified destination.
+- `tcp_cleanup_rbuf`: We use this function instead of `tcp_recvmsg`, because the last one misses `tcp_read_sock` traffic
and we would also need to add more `tracing` to get the socket and package size.
-- `tcp_close`: Function responsible to close connection.
+- `tcp_close`: Function responsible to close connection.
#### TCP retransmit
@@ -881,7 +872,7 @@ calls, it monitors the number of bytes sent and received.
These are tracepoints related to [OOM](https://en.wikipedia.org/wiki/Out_of_memory) killing processes.
-- `oom/mark_victim`: Monitors when an oomkill event happens.
+- `oom/mark_victim`: Monitors when an oomkill event happens.
## Known issues
@@ -897,15 +888,14 @@ node is experiencing high memory usage and there is no obvious culprit to be fou
- Disable [integration with apps](#integration-with-appsplugin).
- Disable [integration with cgroup](#integration-with-cgroupsplugin).
-If with these changes you still suspect eBPF using too much memory, and there is no obvious culprit to be found
+If with these changes you still suspect eBPF using too much memory, and there is no obvious culprit to be found
in the `apps.mem` chart, consider testing for high kernel memory usage by [disabling eBPF monitoring](#configuring-ebpfplugin).
-Next, [restart Netdata](/packaging/installer/README.md#maintaining-a-netdata-agent-installation) with
-`sudo systemctl restart netdata` to see if system memory usage (see the `system.ram` chart) has dropped significantly.
+Next, [restart Netdata](/docs/netdata-agent/start-stop-restart.md) to see if system memory usage (see the `system.ram` chart) has dropped significantly.
Beginning with `v1.31`, kernel memory usage is configurable via the [`pid table size` setting](#pid-table-size)
in `ebpf.conf`.
-The total memory usage is a well known [issue](https://lore.kernel.org/all/167821082315.1693.6957546778534183486.git-patchwork-notify@kernel.org/)
+The total memory usage is a well known [issue](https://lore.kernel.org/all/167821082315.1693.6957546778534183486.git-patchwork-notify@kernel.org/)
for eBPF, this is not a bug present in plugin.
### SELinux
@@ -950,7 +940,7 @@ This will create two new files: `netdata_ebpf.te` and `netdata_ebpf.mod`.
Edit the `netdata_ebpf.te` file to change the options `class` and `allow`. You should have the following at the end of
the `netdata_ebpf.te` file.
-```conf
+```text
module netdata_ebpf 1.0;
require {
type unconfined_service_t;
@@ -981,7 +971,7 @@ a feature called "lockdown," which may affect `ebpf.plugin` depending how the ke
shows how the lockdown module impacts `ebpf.plugin` based on the selected options:
| Enforcing kernel lockdown | Enable lockdown LSM early in init | Default lockdown mode | Can `ebpf.plugin` run with this? |
-| :------------------------ | :-------------------------------- | :-------------------- | :------------------------------- |
+|:--------------------------|:----------------------------------|:----------------------|:---------------------------------|
| YES | NO | NO | YES |
| YES | Yes | None | YES |
| YES | Yes | Integrity | YES |
diff --git a/src/collectors/ebpf.plugin/ebpf.c b/src/collectors/ebpf.plugin/ebpf.c
index 5424ea8f0..4cc263e73 100644
--- a/src/collectors/ebpf.plugin/ebpf.c
+++ b/src/collectors/ebpf.plugin/ebpf.c
@@ -19,11 +19,7 @@ char *ebpf_plugin_dir = PLUGINS_DIR;
static char *ebpf_configured_log_dir = LOG_DIR;
char *ebpf_algorithms[] = { EBPF_CHART_ALGORITHM_ABSOLUTE, EBPF_CHART_ALGORITHM_INCREMENTAL};
-struct config collector_config = { .first_section = NULL,
- .last_section = NULL,
- .mutex = NETDATA_MUTEX_INITIALIZER,
- .index = { .avl_tree = { .root = NULL, .compar = appconfig_section_compare },
- .rwlock = AVL_LOCK_INITIALIZER } };
+struct config collector_config = APPCONFIG_INITIALIZER;
int running_on_kernel = 0;
int ebpf_nprocs;
@@ -661,7 +657,7 @@ struct vfs_bpf *vfs_bpf_obj = NULL;
#else
void *default_btf = NULL;
#endif
-char *btf_path = NULL;
+const char *btf_path = NULL;
/*****************************************************************
*
@@ -1415,7 +1411,7 @@ void ebpf_send_data_aral_chart(ARAL *memory, ebpf_module_t *em)
char *mem = { NETDATA_EBPF_STAT_DIMENSION_MEMORY };
char *aral = { NETDATA_EBPF_STAT_DIMENSION_ARAL };
- struct aral_statistics *stats = aral_statistics(memory);
+ struct aral_statistics *stats = aral_get_statistics(memory);
ebpf_write_begin_chart(NETDATA_MONITORING_FAMILY, em->memory_usage, "");
write_chart_dimension(mem, (long long)stats->structures.allocated_bytes);
@@ -1608,7 +1604,7 @@ static void get_ipv6_last_addr(union netdata_ip_t *out, union netdata_ip_t *in,
*
* @return it returns 0 on success and -1 otherwise.
*/
-static inline int ebpf_ip2nl(uint8_t *dst, char *ip, int domain, char *source)
+static inline int ebpf_ip2nl(uint8_t *dst, const char *ip, int domain, char *source)
{
if (inet_pton(domain, ip, dst) <= 0) {
netdata_log_error("The address specified (%s) is invalid ", source);
@@ -1666,14 +1662,14 @@ void ebpf_clean_ip_structure(ebpf_network_viewer_ip_list_t **clean)
* @param out a pointer to store the link list
* @param ip the value given as parameter
*/
-static void ebpf_parse_ip_list_unsafe(void **out, char *ip)
+static void ebpf_parse_ip_list_unsafe(void **out, const char *ip)
{
ebpf_network_viewer_ip_list_t **list = (ebpf_network_viewer_ip_list_t **)out;
char *ipdup = strdupz(ip);
union netdata_ip_t first = { };
union netdata_ip_t last = { };
- char *is_ipv6;
+ const char *is_ipv6;
if (*ip == '*' && *(ip+1) == '\0') {
memset(first.addr8, 0, sizeof(first.addr8));
memset(last.addr8, 0xFF, sizeof(last.addr8));
@@ -1684,7 +1680,8 @@ static void ebpf_parse_ip_list_unsafe(void **out, char *ip)
goto storethisip;
}
- char *end = ip;
+ char *enddup = strdupz(ip);
+ char *end = enddup;
// Move while I cannot find a separator
while (*end && *end != '/' && *end != '-') end++;
@@ -1814,7 +1811,7 @@ static void ebpf_parse_ip_list_unsafe(void **out, char *ip)
ebpf_network_viewer_ip_list_t *store;
- storethisip:
+storethisip:
store = callocz(1, sizeof(ebpf_network_viewer_ip_list_t));
store->value = ipdup;
store->hash = simple_hash(ipdup);
@@ -1825,8 +1822,9 @@ static void ebpf_parse_ip_list_unsafe(void **out, char *ip)
ebpf_fill_ip_list_unsafe(list, store, "socket");
return;
- cleanipdup:
+cleanipdup:
freez(ipdup);
+ freez(enddup);
}
/**
@@ -1836,7 +1834,7 @@ static void ebpf_parse_ip_list_unsafe(void **out, char *ip)
*
* @param ptr is a pointer with the text to parse.
*/
-void ebpf_parse_ips_unsafe(char *ptr)
+void ebpf_parse_ips_unsafe(const char *ptr)
{
// No value
if (unlikely(!ptr))
@@ -1927,7 +1925,7 @@ static inline void fill_port_list(ebpf_network_viewer_port_list_t **out, ebpf_ne
* @param out a pointer to store the link list
* @param service the service used to create the structure that will be linked.
*/
-static void ebpf_parse_service_list(void **out, char *service)
+static void ebpf_parse_service_list(void **out, const char *service)
{
ebpf_network_viewer_port_list_t **list = (ebpf_network_viewer_port_list_t **)out;
struct servent *serv = getservbyname((const char *)service, "tcp");
@@ -1956,8 +1954,10 @@ static void ebpf_parse_service_list(void **out, char *service)
* @param out a pointer to store the link list
* @param range the informed range for the user.
*/
-static void ebpf_parse_port_list(void **out, char *range)
-{
+static void ebpf_parse_port_list(void **out, const char *range_param) {
+ char range[strlen(range_param) + 1];
+ strncpyz(range, range_param, strlen(range_param));
+
int first, last;
ebpf_network_viewer_port_list_t **list = (ebpf_network_viewer_port_list_t **)out;
@@ -2029,7 +2029,7 @@ static void ebpf_parse_port_list(void **out, char *range)
*
* @param ptr is a pointer with the text to parse.
*/
-void ebpf_parse_ports(char *ptr)
+void ebpf_parse_ports(const char *ptr)
{
// No value
if (unlikely(!ptr))
@@ -2480,7 +2480,7 @@ static void ebpf_link_hostname(ebpf_network_viewer_hostname_list_t **out, ebpf_n
* @param out is the output link list
* @param parse is a pointer with the text to parser.
*/
-static void ebpf_link_hostnames(char *parse)
+static void ebpf_link_hostnames(const char *parse)
{
// No value
if (unlikely(!parse))
@@ -2536,7 +2536,7 @@ void parse_network_viewer_section(struct config *cfg)
EBPF_CONFIG_RESOLVE_SERVICE,
CONFIG_BOOLEAN_YES);
- char *value = appconfig_get(cfg, EBPF_NETWORK_VIEWER_SECTION, EBPF_CONFIG_PORTS, NULL);
+ const char *value = appconfig_get(cfg, EBPF_NETWORK_VIEWER_SECTION, EBPF_CONFIG_PORTS, NULL);
ebpf_parse_ports(value);
if (network_viewer_opt.hostname_resolution_enabled) {
@@ -2684,7 +2684,7 @@ static void ebpf_allocate_common_vectors()
*
* @param ptr the option given by users
*/
-static inline void ebpf_how_to_load(char *ptr)
+static inline void ebpf_how_to_load(const char *ptr)
{
if (!strcasecmp(ptr, EBPF_CFG_LOAD_MODE_RETURN))
ebpf_set_thread_mode(MODE_RETURN);
@@ -2775,7 +2775,7 @@ static inline void ebpf_set_load_mode(netdata_ebpf_load_mode_t load, netdata_ebp
* @param str value read from configuration file.
* @param origin specify the configuration file loaded
*/
-static inline void epbf_update_load_mode(char *str, netdata_ebpf_load_mode_t origin)
+static inline void epbf_update_load_mode(const char *str, netdata_ebpf_load_mode_t origin)
{
netdata_ebpf_load_mode_t load = epbf_convert_string_to_load_mode(str);
@@ -2808,7 +2808,7 @@ static void read_collector_values(int *disable_cgroups,
int update_every, netdata_ebpf_load_mode_t origin)
{
// Read global section
- char *value;
+ const char *value;
if (appconfig_exists(&collector_config, EBPF_GLOBAL_SECTION, "load")) // Backward compatibility
value = appconfig_get(&collector_config, EBPF_GLOBAL_SECTION, "load",
EBPF_CFG_LOAD_MODE_DEFAULT);
@@ -4005,7 +4005,6 @@ static void ebpf_manage_pid(pid_t pid)
*/
int main(int argc, char **argv)
{
- clocks_init();
nd_log_initialize_for_external_plugins(NETDATA_EBPF_PLUGIN_NAME);
ebpf_set_global_variables();
@@ -4034,6 +4033,10 @@ int main(int argc, char **argv)
#ifdef LIBBPF_MAJOR_VERSION
libbpf_set_strict_mode(LIBBPF_STRICT_ALL);
+
+#ifndef NETDATA_INTERNAL_CHECKS
+ libbpf_set_print(netdata_silent_libbpf_vfprintf);
+#endif
#endif
ebpf_read_local_addresses_unsafe();
@@ -4072,16 +4075,14 @@ int main(int argc, char **argv)
}
}
- usec_t step = USEC_PER_SEC;
heartbeat_t hb;
- heartbeat_init(&hb);
+ heartbeat_init(&hb, USEC_PER_SEC);
int update_apps_every = (int) EBPF_CFG_UPDATE_APPS_EVERY_DEFAULT;
- uint32_t max_period = EBPF_CLEANUP_FACTOR;
int update_apps_list = update_apps_every - 1;
int process_maps_per_core = ebpf_modules[EBPF_MODULE_PROCESS_IDX].maps_per_core;
//Plugin will be killed when it receives a signal
for ( ; !ebpf_plugin_stop(); global_iterations_counter++) {
- (void)heartbeat_next(&hb, step);
+ (void)heartbeat_next(&hb);
if (global_iterations_counter % EBPF_DEFAULT_UPDATE_EVERY == 0) {
pthread_mutex_lock(&lock);
@@ -4099,7 +4100,7 @@ int main(int argc, char **argv)
pthread_mutex_lock(&collect_data_mutex);
ebpf_parse_proc_files();
if (collect_pids & (1<<EBPF_MODULE_PROCESS_IDX)) {
- collect_data_for_all_processes(process_pid_fd, process_maps_per_core, max_period);
+ collect_data_for_all_processes(process_pid_fd, process_maps_per_core);
}
ebpf_create_apps_charts(apps_groups_root_target);
diff --git a/src/collectors/ebpf.plugin/ebpf_apps.c b/src/collectors/ebpf.plugin/ebpf_apps.c
index d90c5f128..dc66cf774 100644
--- a/src/collectors/ebpf.plugin/ebpf_apps.c
+++ b/src/collectors/ebpf.plugin/ebpf_apps.c
@@ -327,7 +327,7 @@ int pids_fd[EBPF_PIDS_END_IDX];
static size_t
// global_iterations_counter = 1,
- calls_counter = 0,
+ //calls_counter = 0,
// file_counter = 0,
// filenames_allocated_counter = 0,
// inodes_changed_counter = 0,
@@ -426,7 +426,7 @@ static inline void assign_target_to_pid(ebpf_pid_data_t *p)
static inline int read_proc_pid_cmdline(ebpf_pid_data_t *p, char *cmdline)
{
char filename[FILENAME_MAX + 1];
- snprintfz(filename, FILENAME_MAX, "%s/proc/%d/cmdline", netdata_configured_host_prefix, p->pid);
+ snprintfz(filename, FILENAME_MAX, "%s/proc/%u/cmdline", netdata_configured_host_prefix, p->pid);
int ret = 0;
@@ -490,7 +490,7 @@ static inline int read_proc_pid_stat(ebpf_pid_data_t *p)
char *comm = procfile_lineword(ff, 0, 1);
int32_t ppid = (int32_t)str2pid_t(procfile_lineword(ff, 0, 3));
- if (p->ppid == ppid && p->target)
+ if (p->ppid == (uint32_t)ppid && p->target)
goto without_cmdline_target;
p->ppid = ppid;
@@ -546,7 +546,7 @@ static inline int ebpf_collect_data_for_pid(pid_t pid)
read_proc_pid_stat(p);
// check its parent pid
- if (unlikely( p->ppid > pid_max)) {
+ if (unlikely( p->ppid > (uint32_t)pid_max)) {
netdata_log_error("Pid %d (command '%s') states invalid parent pid %u. Using 0.", pid, p->comm, p->ppid);
p->ppid = 0;
}
@@ -906,9 +906,8 @@ void ebpf_process_sum_values_for_pids(ebpf_process_stat_t *process, struct ebpf_
*
* @param tbl_pid_stats_fd The mapped file descriptor for the hash table.
* @param maps_per_core do I have hash maps per core?
- * @param max_period max period to wait before remove from hash table.
*/
-void collect_data_for_all_processes(int tbl_pid_stats_fd, int maps_per_core, uint32_t max_period)
+void collect_data_for_all_processes(int tbl_pid_stats_fd, int maps_per_core)
{
if (tbl_pid_stats_fd == -1)
return;
diff --git a/src/collectors/ebpf.plugin/ebpf_apps.h b/src/collectors/ebpf.plugin/ebpf_apps.h
index 98c9995da..5bf8953ad 100644
--- a/src/collectors/ebpf.plugin/ebpf_apps.h
+++ b/src/collectors/ebpf.plugin/ebpf_apps.h
@@ -495,7 +495,7 @@ int ebpf_read_hash_table(void *ep, int fd, uint32_t pid);
int get_pid_comm(pid_t pid, size_t n, char *dest);
-void collect_data_for_all_processes(int tbl_pid_stats_fd, int maps_per_core, uint32_t max_period);
+void collect_data_for_all_processes(int tbl_pid_stats_fd, int maps_per_core);
void ebpf_process_apps_accumulator(ebpf_process_stat_t *out, int maps_per_core);
// The default value is at least 32 times smaller than maximum number of PIDs allowed on system,
diff --git a/src/collectors/ebpf.plugin/ebpf_cachestat.c b/src/collectors/ebpf.plugin/ebpf_cachestat.c
index 8c0260d51..49a5d98a1 100644
--- a/src/collectors/ebpf.plugin/ebpf_cachestat.c
+++ b/src/collectors/ebpf.plugin/ebpf_cachestat.c
@@ -43,11 +43,7 @@ ebpf_local_maps_t cachestat_maps[] = {{.name = "cstat_global", .internal_input =
#endif
}};
-struct config cachestat_config = { .first_section = NULL,
- .last_section = NULL,
- .mutex = NETDATA_MUTEX_INITIALIZER,
- .index = { .avl_tree = { .root = NULL, .compar = appconfig_section_compare },
- .rwlock = AVL_LOCK_INITIALIZER } };
+struct config cachestat_config = APPCONFIG_INITIALIZER;
netdata_ebpf_targets_t cachestat_targets[] = { {.name = "add_to_page_cache_lru", .mode = EBPF_LOAD_TRAMPOLINE},
{.name = "mark_page_accessed", .mode = EBPF_LOAD_TRAMPOLINE},
@@ -716,9 +712,8 @@ static inline void cachestat_save_pid_values(netdata_publish_cachestat_t *out, n
* Read the apps table and store data inside the structure.
*
* @param maps_per_core do I need to read all cores?
- * @param max_period limit of iterations without updates before remove data from hash table
*/
-static void ebpf_read_cachestat_apps_table(int maps_per_core, uint32_t max_period)
+static void ebpf_read_cachestat_apps_table(int maps_per_core)
{
netdata_cachestat_pid_t *cv = cachestat_vector;
int fd = cachestat_maps[NETDATA_CACHESTAT_PID_STATS].map_fd;
@@ -842,28 +837,25 @@ void ebpf_resume_apps_data()
*/
void *ebpf_read_cachestat_thread(void *ptr)
{
- heartbeat_t hb;
- heartbeat_init(&hb);
-
ebpf_module_t *em = (ebpf_module_t *)ptr;
int maps_per_core = em->maps_per_core;
int update_every = em->update_every;
- uint32_t max_period = EBPF_CLEANUP_FACTOR;
int counter = update_every - 1;
uint32_t lifetime = em->lifetime;
uint32_t running_time = 0;
- usec_t period = update_every * USEC_PER_SEC;
pids_fd[EBPF_PIDS_CACHESTAT_IDX] = cachestat_maps[NETDATA_CACHESTAT_PID_STATS].map_fd;
+ heartbeat_t hb;
+ heartbeat_init(&hb, update_every * USEC_PER_SEC);
while (!ebpf_plugin_stop() && running_time < lifetime) {
- (void)heartbeat_next(&hb, period);
+ (void)heartbeat_next(&hb);
if (ebpf_plugin_stop() || ++counter != update_every)
continue;
pthread_mutex_lock(&collect_data_mutex);
- ebpf_read_cachestat_apps_table(maps_per_core, max_period);
+ ebpf_read_cachestat_apps_table(maps_per_core);
ebpf_resume_apps_data();
pthread_mutex_unlock(&collect_data_mutex);
@@ -1407,7 +1399,7 @@ static void cachestat_collector(ebpf_module_t *em)
int update_every = em->update_every;
int maps_per_core = em->maps_per_core;
heartbeat_t hb;
- heartbeat_init(&hb);
+ heartbeat_init(&hb, USEC_PER_SEC);
int counter = update_every - 1;
//This will be cancelled by its parent
uint32_t running_time = 0;
@@ -1415,7 +1407,7 @@ static void cachestat_collector(ebpf_module_t *em)
netdata_idx_t *stats = em->hash_table_stats;
memset(stats, 0, sizeof(em->hash_table_stats));
while (!ebpf_plugin_stop() && running_time < lifetime) {
- (void)heartbeat_next(&hb, USEC_PER_SEC);
+ (void)heartbeat_next(&hb);
if (ebpf_plugin_stop() || ++counter != update_every)
continue;
diff --git a/src/collectors/ebpf.plugin/ebpf_cgroup.c b/src/collectors/ebpf.plugin/ebpf_cgroup.c
index 9e1fa8231..0bc5989e1 100644
--- a/src/collectors/ebpf.plugin/ebpf_cgroup.c
+++ b/src/collectors/ebpf.plugin/ebpf_cgroup.c
@@ -373,13 +373,12 @@ void ebpf_create_charts_on_systemd(ebpf_systemd_args_t *chart)
*/
void *ebpf_cgroup_integration(void *ptr __maybe_unused)
{
- usec_t step = USEC_PER_SEC;
int counter = NETDATA_EBPF_CGROUP_UPDATE - 1;
heartbeat_t hb;
- heartbeat_init(&hb);
+ heartbeat_init(&hb, USEC_PER_SEC);
//Plugin will be killed when it receives a signal
while (!ebpf_plugin_stop()) {
- (void)heartbeat_next(&hb, step);
+ heartbeat_next(&hb);
// We are using a small heartbeat time to wake up thread,
// but we should not update so frequently the shared memory data
diff --git a/src/collectors/ebpf.plugin/ebpf_dcstat.c b/src/collectors/ebpf.plugin/ebpf_dcstat.c
index e6053cb4a..e84517686 100644
--- a/src/collectors/ebpf.plugin/ebpf_dcstat.c
+++ b/src/collectors/ebpf.plugin/ebpf_dcstat.c
@@ -12,11 +12,7 @@ netdata_dcstat_pid_t *dcstat_vector = NULL;
static netdata_idx_t dcstat_hash_values[NETDATA_DCSTAT_IDX_END];
static netdata_idx_t *dcstat_values = NULL;
-struct config dcstat_config = { .first_section = NULL,
- .last_section = NULL,
- .mutex = NETDATA_MUTEX_INITIALIZER,
- .index = { .avl_tree = { .root = NULL, .compar = appconfig_section_compare },
- .rwlock = AVL_LOCK_INITIALIZER } };
+struct config dcstat_config = APPCONFIG_INITIALIZER;
ebpf_local_maps_t dcstat_maps[] = {{.name = "dcstat_global", .internal_input = NETDATA_DIRECTORY_CACHE_END,
.user_input = 0, .type = NETDATA_EBPF_MAP_STATIC,
@@ -542,9 +538,8 @@ static void ebpf_dcstat_apps_accumulator(netdata_dcstat_pid_t *out, int maps_per
* Read the apps table and store data inside the structure.
*
* @param maps_per_core do I need to read all cores?
- * @param max_period limit of iterations without updates before remove data from hash table
*/
-static void ebpf_read_dc_apps_table(int maps_per_core, uint32_t max_period)
+static void ebpf_read_dc_apps_table(int maps_per_core)
{
netdata_dcstat_pid_t *cv = dcstat_vector;
int fd = dcstat_maps[NETDATA_DCSTAT_PID_STATS].map_fd;
@@ -644,9 +639,6 @@ void ebpf_dc_resume_apps_data()
*/
void *ebpf_read_dcstat_thread(void *ptr)
{
- heartbeat_t hb;
- heartbeat_init(&hb);
-
ebpf_module_t *em = (ebpf_module_t *)ptr;
int maps_per_core = em->maps_per_core;
@@ -659,16 +651,16 @@ void *ebpf_read_dcstat_thread(void *ptr)
uint32_t lifetime = em->lifetime;
uint32_t running_time = 0;
- usec_t period = update_every * USEC_PER_SEC;
- uint32_t max_period = EBPF_CLEANUP_FACTOR;
pids_fd[EBPF_PIDS_DCSTAT_IDX] = dcstat_maps[NETDATA_DCSTAT_PID_STATS].map_fd;
+ heartbeat_t hb;
+ heartbeat_init(&hb, update_every * USEC_PER_SEC);
while (!ebpf_plugin_stop() && running_time < lifetime) {
- (void)heartbeat_next(&hb, period);
+ (void)heartbeat_next(&hb);
if (ebpf_plugin_stop() || ++counter != update_every)
continue;
pthread_mutex_lock(&collect_data_mutex);
- ebpf_read_dc_apps_table(maps_per_core, max_period);
+ ebpf_read_dc_apps_table(maps_per_core);
ebpf_dc_resume_apps_data();
pthread_mutex_unlock(&collect_data_mutex);
@@ -1271,7 +1263,7 @@ static void dcstat_collector(ebpf_module_t *em)
int cgroups = em->cgroup_charts;
int update_every = em->update_every;
heartbeat_t hb;
- heartbeat_init(&hb);
+ heartbeat_init(&hb, USEC_PER_SEC);
int counter = update_every - 1;
int maps_per_core = em->maps_per_core;
uint32_t running_time = 0;
@@ -1279,7 +1271,7 @@ static void dcstat_collector(ebpf_module_t *em)
netdata_idx_t *stats = em->hash_table_stats;
memset(stats, 0, sizeof(em->hash_table_stats));
while (!ebpf_plugin_stop() && running_time < lifetime) {
- (void)heartbeat_next(&hb, USEC_PER_SEC);
+ heartbeat_next(&hb);
if (ebpf_plugin_stop() || ++counter != update_every)
continue;
diff --git a/src/collectors/ebpf.plugin/ebpf_disk.c b/src/collectors/ebpf.plugin/ebpf_disk.c
index 246f98702..3d9c5789c 100644
--- a/src/collectors/ebpf.plugin/ebpf_disk.c
+++ b/src/collectors/ebpf.plugin/ebpf_disk.c
@@ -6,11 +6,7 @@
#include "ebpf.h"
#include "ebpf_disk.h"
-struct config disk_config = { .first_section = NULL,
- .last_section = NULL,
- .mutex = NETDATA_MUTEX_INITIALIZER,
- .index = { .avl_tree = { .root = NULL, .compar = appconfig_section_compare },
- .rwlock = AVL_LOCK_INITIALIZER } };
+struct config disk_config = APPCONFIG_INITIALIZER;
static ebpf_local_maps_t disk_maps[] = {{.name = "tbl_disk_iocall", .internal_input = NETDATA_DISK_HISTOGRAM_LENGTH,
.user_input = 0, .type = NETDATA_EBPF_MAP_STATIC,
@@ -775,13 +771,13 @@ static void disk_collector(ebpf_module_t *em)
int update_every = em->update_every;
heartbeat_t hb;
- heartbeat_init(&hb);
+ heartbeat_init(&hb, USEC_PER_SEC);
int counter = update_every - 1;
int maps_per_core = em->maps_per_core;
uint32_t running_time = 0;
uint32_t lifetime = em->lifetime;
while (!ebpf_plugin_stop() && running_time < lifetime) {
- (void)heartbeat_next(&hb, USEC_PER_SEC);
+ heartbeat_next(&hb);
if (ebpf_plugin_stop() || ++counter != update_every)
continue;
diff --git a/src/collectors/ebpf.plugin/ebpf_fd.c b/src/collectors/ebpf.plugin/ebpf_fd.c
index 61a9595cc..256efa4fe 100644
--- a/src/collectors/ebpf.plugin/ebpf_fd.c
+++ b/src/collectors/ebpf.plugin/ebpf_fd.c
@@ -46,9 +46,7 @@ static ebpf_local_maps_t fd_maps[] = {{.name = "tbl_fd_pid", .internal_input = N
}};
-struct config fd_config = { .first_section = NULL, .last_section = NULL, .mutex = NETDATA_MUTEX_INITIALIZER,
- .index = {.avl_tree = { .root = NULL, .compar = appconfig_section_compare },
- .rwlock = AVL_LOCK_INITIALIZER } };
+struct config fd_config = APPCONFIG_INITIALIZER;
static netdata_idx_t fd_hash_values[NETDATA_FD_COUNTER];
static netdata_idx_t *fd_values = NULL;
@@ -683,9 +681,8 @@ static void fd_apps_accumulator(netdata_fd_stat_t *out, int maps_per_core)
* Read the apps table and store data inside the structure.
*
* @param maps_per_core do I need to read all cores?
- * @param max_period limit of iterations without updates before remove data from hash table
*/
-static void ebpf_read_fd_apps_table(int maps_per_core, uint32_t max_period)
+static void ebpf_read_fd_apps_table(int maps_per_core)
{
netdata_fd_stat_t *fv = fd_vector;
int fd = fd_maps[NETDATA_FD_PID_STATS].map_fd;
@@ -783,9 +780,6 @@ void ebpf_fd_resume_apps_data()
*/
void *ebpf_read_fd_thread(void *ptr)
{
- heartbeat_t hb;
- heartbeat_init(&hb);
-
ebpf_module_t *em = (ebpf_module_t *)ptr;
int maps_per_core = em->maps_per_core;
@@ -798,16 +792,17 @@ void *ebpf_read_fd_thread(void *ptr)
uint32_t lifetime = em->lifetime;
uint32_t running_time = 0;
- int period = USEC_PER_SEC;
- uint32_t max_period = EBPF_CLEANUP_FACTOR;
pids_fd[EBPF_PIDS_FD_IDX] = fd_maps[NETDATA_FD_PID_STATS].map_fd;
+
+ heartbeat_t hb;
+ heartbeat_init(&hb, USEC_PER_SEC);
while (!ebpf_plugin_stop() && running_time < lifetime) {
- (void)heartbeat_next(&hb, period);
+ heartbeat_next(&hb);
if (ebpf_plugin_stop() || ++counter != update_every)
continue;
pthread_mutex_lock(&collect_data_mutex);
- ebpf_read_fd_apps_table(maps_per_core, max_period);
+ ebpf_read_fd_apps_table(maps_per_core);
ebpf_fd_resume_apps_data();
pthread_mutex_unlock(&collect_data_mutex);
@@ -1217,8 +1212,6 @@ static void ebpf_fd_send_cgroup_data(ebpf_module_t *em)
static void fd_collector(ebpf_module_t *em)
{
int cgroups = em->cgroup_charts;
- heartbeat_t hb;
- heartbeat_init(&hb);
int update_every = em->update_every;
int counter = update_every - 1;
int maps_per_core = em->maps_per_core;
@@ -1226,8 +1219,10 @@ static void fd_collector(ebpf_module_t *em)
uint32_t lifetime = em->lifetime;
netdata_idx_t *stats = em->hash_table_stats;
memset(stats, 0, sizeof(em->hash_table_stats));
+ heartbeat_t hb;
+ heartbeat_init(&hb, USEC_PER_SEC);
while (!ebpf_plugin_stop() && running_time < lifetime) {
- (void)heartbeat_next(&hb, USEC_PER_SEC);
+ heartbeat_next(&hb);
if (ebpf_plugin_stop() || ++counter != update_every)
continue;
diff --git a/src/collectors/ebpf.plugin/ebpf_filesystem.c b/src/collectors/ebpf.plugin/ebpf_filesystem.c
index 1187b03e9..30f3c7460 100644
--- a/src/collectors/ebpf.plugin/ebpf_filesystem.c
+++ b/src/collectors/ebpf.plugin/ebpf_filesystem.c
@@ -2,11 +2,7 @@
#include "ebpf_filesystem.h"
-struct config fs_config = { .first_section = NULL,
- .last_section = NULL,
- .mutex = NETDATA_MUTEX_INITIALIZER,
- .index = { .avl_tree = { .root = NULL, .compar = appconfig_section_compare },
- .rwlock = AVL_LOCK_INITIALIZER } };
+struct config fs_config = APPCONFIG_INITIALIZER;
ebpf_local_maps_t ext4_maps[] = {{.name = "tbl_ext4", .internal_input = NETDATA_KEY_CALLS_SYNC,
.user_input = 0, .type = NETDATA_EBPF_MAP_STATIC,
@@ -984,13 +980,13 @@ static void ebpf_histogram_send_data()
static void filesystem_collector(ebpf_module_t *em)
{
int update_every = em->update_every;
- heartbeat_t hb;
- heartbeat_init(&hb);
int counter = update_every - 1;
uint32_t running_time = 0;
uint32_t lifetime = em->lifetime;
+ heartbeat_t hb;
+ heartbeat_init(&hb, USEC_PER_SEC);
while (!ebpf_plugin_stop() && running_time < lifetime) {
- (void)heartbeat_next(&hb, USEC_PER_SEC);
+ heartbeat_next(&hb);
if (ebpf_plugin_stop() || ++counter != update_every)
continue;
diff --git a/src/collectors/ebpf.plugin/ebpf_functions.c b/src/collectors/ebpf.plugin/ebpf_functions.c
index 8e9fb01ed..267159a40 100644
--- a/src/collectors/ebpf.plugin/ebpf_functions.c
+++ b/src/collectors/ebpf.plugin/ebpf_functions.c
@@ -287,7 +287,7 @@ static void ebpf_function_socket_manipulation(const char *transaction,
ebpf_module_t *em = &ebpf_modules[EBPF_MODULE_SOCKET_IDX];
char *words[PLUGINSD_MAX_WORDS] = {NULL};
- size_t num_words = quoted_strings_splitter_pluginsd(function, words, PLUGINSD_MAX_WORDS);
+ size_t num_words = quoted_strings_splitter_whitespace(function, words, PLUGINSD_MAX_WORDS);
const char *name;
int period = -1;
rw_spinlock_write_lock(&ebpf_judy_pid.index.rw_spinlock);
@@ -712,9 +712,9 @@ void *ebpf_function_thread(void *ptr)
pthread_mutex_unlock(&lock);
heartbeat_t hb;
- heartbeat_init(&hb);
+ heartbeat_init(&hb, USEC_PER_SEC);
while(!ebpf_plugin_stop()) {
- (void)heartbeat_next(&hb, USEC_PER_SEC);
+ heartbeat_next(&hb);
if (ebpf_plugin_stop()) {
break;
diff --git a/src/collectors/ebpf.plugin/ebpf_hardirq.c b/src/collectors/ebpf.plugin/ebpf_hardirq.c
index 911425e54..e7974ac05 100644
--- a/src/collectors/ebpf.plugin/ebpf_hardirq.c
+++ b/src/collectors/ebpf.plugin/ebpf_hardirq.c
@@ -3,11 +3,7 @@
#include "ebpf.h"
#include "ebpf_hardirq.h"
-struct config hardirq_config = { .first_section = NULL,
- .last_section = NULL,
- .mutex = NETDATA_MUTEX_INITIALIZER,
- .index = { .avl_tree = { .root = NULL, .compar = appconfig_section_compare },
- .rwlock = AVL_LOCK_INITIALIZER } };
+struct config hardirq_config = APPCONFIG_INITIALIZER;
static ebpf_local_maps_t hardirq_maps[] = {
{
@@ -575,15 +571,15 @@ static void hardirq_collector(ebpf_module_t *em)
pthread_mutex_unlock(&lock);
// loop and read from published data until ebpf plugin is closed.
- heartbeat_t hb;
- heartbeat_init(&hb);
int update_every = em->update_every;
int counter = update_every - 1;
//This will be cancelled by its parent
uint32_t running_time = 0;
uint32_t lifetime = em->lifetime;
+ heartbeat_t hb;
+ heartbeat_init(&hb, USEC_PER_SEC);
while (!ebpf_plugin_stop() && running_time < lifetime) {
- (void)heartbeat_next(&hb, USEC_PER_SEC);
+ heartbeat_next(&hb);
if (ebpf_plugin_stop() || ++counter != update_every)
continue;
diff --git a/src/collectors/ebpf.plugin/ebpf_mdflush.c b/src/collectors/ebpf.plugin/ebpf_mdflush.c
index 77c109bff..3d70b7792 100644
--- a/src/collectors/ebpf.plugin/ebpf_mdflush.c
+++ b/src/collectors/ebpf.plugin/ebpf_mdflush.c
@@ -3,11 +3,7 @@
#include "ebpf.h"
#include "ebpf_mdflush.h"
-struct config mdflush_config = { .first_section = NULL,
- .last_section = NULL,
- .mutex = NETDATA_MUTEX_INITIALIZER,
- .index = { .avl_tree = { .root = NULL, .compar = appconfig_section_compare },
- .rwlock = AVL_LOCK_INITIALIZER } };
+struct config mdflush_config = APPCONFIG_INITIALIZER;
#define MDFLUSH_MAP_COUNT 0
static ebpf_local_maps_t mdflush_maps[] = {
@@ -341,14 +337,14 @@ static void mdflush_collector(ebpf_module_t *em)
pthread_mutex_unlock(&lock);
// loop and read from published data until ebpf plugin is closed.
- heartbeat_t hb;
- heartbeat_init(&hb);
int counter = update_every - 1;
int maps_per_core = em->maps_per_core;
uint32_t running_time = 0;
uint32_t lifetime = em->lifetime;
+ heartbeat_t hb;
+ heartbeat_init(&hb, USEC_PER_SEC);
while (!ebpf_plugin_stop() && running_time < lifetime) {
- (void)heartbeat_next(&hb, USEC_PER_SEC);
+ heartbeat_next(&hb);
if (ebpf_plugin_stop() || ++counter != update_every)
continue;
diff --git a/src/collectors/ebpf.plugin/ebpf_mount.c b/src/collectors/ebpf.plugin/ebpf_mount.c
index 7441cc6e2..4e310c8a6 100644
--- a/src/collectors/ebpf.plugin/ebpf_mount.c
+++ b/src/collectors/ebpf.plugin/ebpf_mount.c
@@ -22,9 +22,7 @@ static char *mount_dimension_name[NETDATA_EBPF_MOUNT_SYSCALL] = { "mount", "umou
static netdata_syscall_stat_t mount_aggregated_data[NETDATA_EBPF_MOUNT_SYSCALL];
static netdata_publish_syscall_t mount_publish_aggregated[NETDATA_EBPF_MOUNT_SYSCALL];
-struct config mount_config = { .first_section = NULL, .last_section = NULL, .mutex = NETDATA_MUTEX_INITIALIZER,
- .index = {.avl_tree = { .root = NULL, .compar = appconfig_section_compare },
- .rwlock = AVL_LOCK_INITIALIZER } };
+struct config mount_config = APPCONFIG_INITIALIZER;
static netdata_idx_t mount_hash_values[NETDATA_MOUNT_END];
@@ -363,15 +361,15 @@ static void mount_collector(ebpf_module_t *em)
{
memset(mount_hash_values, 0, sizeof(mount_hash_values));
- heartbeat_t hb;
- heartbeat_init(&hb);
int update_every = em->update_every;
int counter = update_every - 1;
int maps_per_core = em->maps_per_core;
uint32_t running_time = 0;
uint32_t lifetime = em->lifetime;
+ heartbeat_t hb;
+ heartbeat_init(&hb, USEC_PER_SEC);
while (!ebpf_plugin_stop() && running_time < lifetime) {
- (void)heartbeat_next(&hb, USEC_PER_SEC);
+ heartbeat_next(&hb);
if (ebpf_plugin_stop() || ++counter != update_every)
continue;
diff --git a/src/collectors/ebpf.plugin/ebpf_oomkill.c b/src/collectors/ebpf.plugin/ebpf_oomkill.c
index 34361550b..d32095abc 100644
--- a/src/collectors/ebpf.plugin/ebpf_oomkill.c
+++ b/src/collectors/ebpf.plugin/ebpf_oomkill.c
@@ -3,11 +3,7 @@
#include "ebpf.h"
#include "ebpf_oomkill.h"
-struct config oomkill_config = { .first_section = NULL,
- .last_section = NULL,
- .mutex = NETDATA_MUTEX_INITIALIZER,
- .index = { .avl_tree = { .root = NULL, .compar = appconfig_section_compare },
- .rwlock = AVL_LOCK_INITIALIZER } };
+struct config oomkill_config = APPCONFIG_INITIALIZER;
#define OOMKILL_MAP_KILLCNT 0
static ebpf_local_maps_t oomkill_maps[] = {
@@ -463,14 +459,14 @@ static void oomkill_collector(ebpf_module_t *em)
memset(keys, 0, sizeof(keys));
// loop and read until ebpf plugin is closed.
- heartbeat_t hb;
- heartbeat_init(&hb);
int counter = update_every - 1;
uint32_t running_time = 0;
uint32_t lifetime = em->lifetime;
netdata_idx_t *stats = em->hash_table_stats;
+ heartbeat_t hb;
+ heartbeat_init(&hb, USEC_PER_SEC);
while (!ebpf_plugin_stop() && running_time < lifetime) {
- (void)heartbeat_next(&hb, USEC_PER_SEC);
+ (void)heartbeat_next(&hb);
if (ebpf_plugin_stop() || ++counter != update_every)
continue;
diff --git a/src/collectors/ebpf.plugin/ebpf_process.c b/src/collectors/ebpf.plugin/ebpf_process.c
index d2810f899..d80f7a3e8 100644
--- a/src/collectors/ebpf.plugin/ebpf_process.c
+++ b/src/collectors/ebpf.plugin/ebpf_process.c
@@ -57,11 +57,7 @@ ebpf_process_stat_t *process_stat_vector = NULL;
static netdata_syscall_stat_t process_aggregated_data[NETDATA_KEY_PUBLISH_PROCESS_END];
static netdata_publish_syscall_t process_publish_aggregated[NETDATA_KEY_PUBLISH_PROCESS_END];
-struct config process_config = { .first_section = NULL,
- .last_section = NULL,
- .mutex = NETDATA_MUTEX_INITIALIZER,
- .index = { .avl_tree = { .root = NULL, .compar = appconfig_section_compare },
- .rwlock = AVL_LOCK_INITIALIZER } };
+struct config process_config = APPCONFIG_INITIALIZER;
/*****************************************************************
*
@@ -1124,8 +1120,6 @@ void ebpf_process_update_cgroup_algorithm()
*/
static void process_collector(ebpf_module_t *em)
{
- heartbeat_t hb;
- heartbeat_init(&hb);
int publish_global = em->global_charts;
int cgroups = em->cgroup_charts;
pthread_mutex_lock(&ebpf_exit_cleanup);
@@ -1141,9 +1135,11 @@ static void process_collector(ebpf_module_t *em)
uint32_t lifetime = em->lifetime;
netdata_idx_t *stats = em->hash_table_stats;
memset(stats, 0, sizeof(em->hash_table_stats));
+ heartbeat_t hb;
+ heartbeat_init(&hb, USEC_PER_SEC);
while (!ebpf_plugin_stop() && running_time < lifetime) {
- usec_t dt = heartbeat_next(&hb, USEC_PER_SEC);
- (void)dt;
+ heartbeat_next(&hb);
+
if (ebpf_plugin_stop())
break;
diff --git a/src/collectors/ebpf.plugin/ebpf_shm.c b/src/collectors/ebpf.plugin/ebpf_shm.c
index ac44549b2..6282a2547 100644
--- a/src/collectors/ebpf.plugin/ebpf_shm.c
+++ b/src/collectors/ebpf.plugin/ebpf_shm.c
@@ -12,11 +12,7 @@ netdata_ebpf_shm_t *shm_vector = NULL;
static netdata_idx_t shm_hash_values[NETDATA_SHM_END];
static netdata_idx_t *shm_values = NULL;
-struct config shm_config = { .first_section = NULL,
- .last_section = NULL,
- .mutex = NETDATA_MUTEX_INITIALIZER,
- .index = { .avl_tree = { .root = NULL, .compar = appconfig_section_compare },
- .rwlock = AVL_LOCK_INITIALIZER } };
+struct config shm_config = APPCONFIG_INITIALIZER;
static ebpf_local_maps_t shm_maps[] = {{.name = "tbl_pid_shm", .internal_input = ND_EBPF_DEFAULT_PID_SIZE,
.user_input = 0,
@@ -569,9 +565,8 @@ static void ebpf_update_shm_cgroup()
* Read the apps table and store data inside the structure.
*
* @param maps_per_core do I need to read all cores?
- * @param max_period limit of iterations without updates before remove data from hash table
*/
-static void ebpf_read_shm_apps_table(int maps_per_core, uint32_t max_period)
+static void ebpf_read_shm_apps_table(int maps_per_core)
{
netdata_ebpf_shm_t *cv = shm_vector;
int fd = shm_maps[NETDATA_PID_SHM_TABLE].map_fd;
@@ -1063,9 +1058,6 @@ void ebpf_shm_resume_apps_data() {
*/
void *ebpf_read_shm_thread(void *ptr)
{
- heartbeat_t hb;
- heartbeat_init(&hb);
-
ebpf_module_t *em = (ebpf_module_t *)ptr;
int maps_per_core = em->maps_per_core;
@@ -1078,16 +1070,16 @@ void *ebpf_read_shm_thread(void *ptr)
uint32_t lifetime = em->lifetime;
uint32_t running_time = 0;
- usec_t period = update_every * USEC_PER_SEC;
- uint32_t max_period = EBPF_CLEANUP_FACTOR;
pids_fd[EBPF_PIDS_SHM_IDX] = shm_maps[NETDATA_PID_SHM_TABLE].map_fd;
+ heartbeat_t hb;
+ heartbeat_init(&hb, update_every * USEC_PER_SEC);
while (!ebpf_plugin_stop() && running_time < lifetime) {
- (void)heartbeat_next(&hb, period);
+ (void)heartbeat_next(&hb);
if (ebpf_plugin_stop() || ++counter != update_every)
continue;
pthread_mutex_lock(&collect_data_mutex);
- ebpf_read_shm_apps_table(maps_per_core, max_period);
+ ebpf_read_shm_apps_table(maps_per_core);
ebpf_shm_resume_apps_data();
pthread_mutex_unlock(&collect_data_mutex);
@@ -1113,16 +1105,17 @@ static void shm_collector(ebpf_module_t *em)
{
int cgroups = em->cgroup_charts;
int update_every = em->update_every;
- heartbeat_t hb;
- heartbeat_init(&hb);
int counter = update_every - 1;
int maps_per_core = em->maps_per_core;
uint32_t running_time = 0;
uint32_t lifetime = em->lifetime;
netdata_idx_t *stats = em->hash_table_stats;
memset(stats, 0, sizeof(em->hash_table_stats));
+ heartbeat_t hb;
+ heartbeat_init(&hb, USEC_PER_SEC);
while (!ebpf_plugin_stop() && running_time < lifetime) {
- (void)heartbeat_next(&hb, USEC_PER_SEC);
+ heartbeat_next(&hb);
+
if (ebpf_plugin_stop() || ++counter != update_every)
continue;
diff --git a/src/collectors/ebpf.plugin/ebpf_socket.c b/src/collectors/ebpf.plugin/ebpf_socket.c
index 5b87a3256..f0d376f43 100644
--- a/src/collectors/ebpf.plugin/ebpf_socket.c
+++ b/src/collectors/ebpf.plugin/ebpf_socket.c
@@ -77,11 +77,7 @@ netdata_socket_t *socket_values;
ebpf_network_viewer_port_list_t *listen_ports = NULL;
ebpf_addresses_t tcp_v6_connect_address = {.function = "tcp_v6_connect", .hash = 0, .addr = 0, .type = 0};
-struct config socket_config = { .first_section = NULL,
- .last_section = NULL,
- .mutex = NETDATA_MUTEX_INITIALIZER,
- .index = { .avl_tree = { .root = NULL, .compar = appconfig_section_compare },
- .rwlock = AVL_LOCK_INITIALIZER } };
+struct config socket_config = APPCONFIG_INITIALIZER;
netdata_ebpf_targets_t socket_targets[] = { {.name = "inet_csk_accept", .mode = EBPF_LOAD_PROBE},
{.name = "tcp_retransmit_skb", .mode = EBPF_LOAD_PROBE},
@@ -1815,9 +1811,6 @@ void ebpf_socket_resume_apps_data()
*/
void *ebpf_read_socket_thread(void *ptr)
{
- heartbeat_t hb;
- heartbeat_init(&hb);
-
ebpf_module_t *em = (ebpf_module_t *)ptr;
ebpf_update_array_vectors(em);
@@ -1830,9 +1823,10 @@ void *ebpf_read_socket_thread(void *ptr)
uint32_t running_time = 0;
uint32_t lifetime = em->lifetime;
- usec_t period = update_every * USEC_PER_SEC;
+ heartbeat_t hb;
+ heartbeat_init(&hb, update_every * USEC_PER_SEC);
while (!ebpf_plugin_stop() && running_time < lifetime) {
- (void)heartbeat_next(&hb, period);
+ heartbeat_next(&hb);
if (ebpf_plugin_stop() || ++counter != update_every)
continue;
@@ -2612,9 +2606,6 @@ static void ebpf_socket_send_cgroup_data(int update_every)
*/
static void socket_collector(ebpf_module_t *em)
{
- heartbeat_t hb;
- heartbeat_init(&hb);
-
int cgroups = em->cgroup_charts;
if (cgroups)
ebpf_socket_update_cgroup_algorithm();
@@ -2627,8 +2618,10 @@ static void socket_collector(ebpf_module_t *em)
uint32_t lifetime = em->lifetime;
netdata_idx_t *stats = em->hash_table_stats;
memset(stats, 0, sizeof(em->hash_table_stats));
+ heartbeat_t hb;
+ heartbeat_init(&hb, USEC_PER_SEC);
while (!ebpf_plugin_stop() && running_time < lifetime) {
- (void)heartbeat_next(&hb, USEC_PER_SEC);
+ heartbeat_next(&hb);
if (ebpf_plugin_stop() || ++counter != update_every)
continue;
@@ -2708,7 +2701,7 @@ static void ebpf_socket_initialize_global_vectors()
* @param hash the calculated hash for the dimension name.
* @param name the dimension name.
*/
-static void ebpf_link_dimension_name(char *port, uint32_t hash, char *value)
+static void ebpf_link_dimension_name(const char *port, uint32_t hash, const char *value)
{
int test = str2i(port);
if (test < NETDATA_MINIMUM_PORT_VALUE || test > NETDATA_MAXIMUM_PORT_VALUE){
@@ -2753,15 +2746,15 @@ static void ebpf_link_dimension_name(char *port, uint32_t hash, char *value)
*
* @param cfg the configuration structure
*/
+
+static bool config_service_value_cb(void *data __maybe_unused, const char *name, const char *value) {
+ ebpf_link_dimension_name(name, simple_hash(name), value);
+ return true;
+}
+
void ebpf_parse_service_name_section(struct config *cfg)
{
- struct section *co = appconfig_get_section(cfg, EBPF_SERVICE_NAME_SECTION);
- if (co) {
- struct config_option *cv;
- for (cv = co->values; cv ; cv = cv->next) {
- ebpf_link_dimension_name(cv->name, cv->hash, cv->value);
- }
- }
+ appconfig_foreach_value_in_section(cfg, EBPF_SERVICE_NAME_SECTION, config_service_value_cb, NULL);
// Always associated the default port to Netdata
ebpf_network_viewer_dim_name_t *names = network_viewer_opt.names;
diff --git a/src/collectors/ebpf.plugin/ebpf_socket.h b/src/collectors/ebpf.plugin/ebpf_socket.h
index e01126035..a236985eb 100644
--- a/src/collectors/ebpf.plugin/ebpf_socket.h
+++ b/src/collectors/ebpf.plugin/ebpf_socket.h
@@ -339,8 +339,8 @@ extern ebpf_network_viewer_port_list_t *listen_ports;
void update_listen_table(uint16_t value, uint16_t proto, netdata_passive_connection_t *values);
void ebpf_fill_ip_list_unsafe(ebpf_network_viewer_ip_list_t **out, ebpf_network_viewer_ip_list_t *in, char *table);
void ebpf_parse_service_name_section(struct config *cfg);
-void ebpf_parse_ips_unsafe(char *ptr);
-void ebpf_parse_ports(char *ptr);
+void ebpf_parse_ips_unsafe(const char *ptr);
+void ebpf_parse_ports(const char *ptr);
void ebpf_socket_read_open_connections(BUFFER *buf, struct ebpf_module *em);
void ebpf_socket_fill_publish_apps(ebpf_socket_publish_apps_t *curr, netdata_socket_t *ns);
diff --git a/src/collectors/ebpf.plugin/ebpf_softirq.c b/src/collectors/ebpf.plugin/ebpf_softirq.c
index 21bd83a3e..19c495eea 100644
--- a/src/collectors/ebpf.plugin/ebpf_softirq.c
+++ b/src/collectors/ebpf.plugin/ebpf_softirq.c
@@ -3,11 +3,7 @@
#include "ebpf.h"
#include "ebpf_softirq.h"
-struct config softirq_config = { .first_section = NULL,
- .last_section = NULL,
- .mutex = NETDATA_MUTEX_INITIALIZER,
- .index = { .avl_tree = { .root = NULL, .compar = appconfig_section_compare },
- .rwlock = AVL_LOCK_INITIALIZER } };
+struct config softirq_config = APPCONFIG_INITIALIZER;
#define SOFTIRQ_MAP_LATENCY 0
static ebpf_local_maps_t softirq_maps[] = {
@@ -213,7 +209,7 @@ static void softirq_collector(ebpf_module_t *em)
// loop and read from published data until ebpf plugin is closed.
heartbeat_t hb;
- heartbeat_init(&hb);
+ heartbeat_init(&hb, USEC_PER_SEC);
int update_every = em->update_every;
int counter = update_every - 1;
int maps_per_core = em->maps_per_core;
@@ -221,7 +217,7 @@ static void softirq_collector(ebpf_module_t *em)
uint32_t running_time = 0;
uint32_t lifetime = em->lifetime;
while (!ebpf_plugin_stop() && running_time < lifetime) {
- (void)heartbeat_next(&hb, USEC_PER_SEC);
+ heartbeat_next(&hb);
if (ebpf_plugin_stop() || ++counter != update_every)
continue;
diff --git a/src/collectors/ebpf.plugin/ebpf_swap.c b/src/collectors/ebpf.plugin/ebpf_swap.c
index 933353178..3be56cfa4 100644
--- a/src/collectors/ebpf.plugin/ebpf_swap.c
+++ b/src/collectors/ebpf.plugin/ebpf_swap.c
@@ -12,11 +12,7 @@ static netdata_idx_t *swap_values = NULL;
netdata_ebpf_swap_t *swap_vector = NULL;
-struct config swap_config = { .first_section = NULL,
- .last_section = NULL,
- .mutex = NETDATA_MUTEX_INITIALIZER,
- .index = { .avl_tree = { .root = NULL, .compar = appconfig_section_compare },
- .rwlock = AVL_LOCK_INITIALIZER } };
+struct config swap_config = APPCONFIG_INITIALIZER;
static ebpf_local_maps_t swap_maps[] = {{.name = "tbl_pid_swap", .internal_input = ND_EBPF_DEFAULT_PID_SIZE,
.user_input = 0,
@@ -543,9 +539,8 @@ void ebpf_swap_resume_apps_data() {
* Read the apps table and store data inside the structure.
*
* @param maps_per_core do I need to read all cores?
- * @param max_period limit of iterations without updates before remove data from hash table
*/
-static void ebpf_read_swap_apps_table(int maps_per_core, uint32_t max_period)
+static void ebpf_read_swap_apps_table(int maps_per_core)
{
netdata_ebpf_swap_t *cv = swap_vector;
int fd = swap_maps[NETDATA_PID_SWAP_TABLE].map_fd;
@@ -597,9 +592,6 @@ end_swap_loop:
*/
void *ebpf_read_swap_thread(void *ptr)
{
- heartbeat_t hb;
- heartbeat_init(&hb);
-
ebpf_module_t *em = (ebpf_module_t *)ptr;
int maps_per_core = em->maps_per_core;
@@ -612,17 +604,17 @@ void *ebpf_read_swap_thread(void *ptr)
uint32_t lifetime = em->lifetime;
uint32_t running_time = 0;
- usec_t period = update_every * USEC_PER_SEC;
- uint32_t max_period = EBPF_CLEANUP_FACTOR;
pids_fd[EBPF_PIDS_SWAP_IDX] = swap_maps[NETDATA_PID_SWAP_TABLE].map_fd;
+ heartbeat_t hb;
+ heartbeat_init(&hb, update_every * USEC_PER_SEC);
while (!ebpf_plugin_stop() && running_time < lifetime) {
- (void)heartbeat_next(&hb, period);
+ heartbeat_next(&hb);
if (ebpf_plugin_stop() || ++counter != update_every)
continue;
pthread_mutex_lock(&collect_data_mutex);
- ebpf_read_swap_apps_table(maps_per_core, max_period);
+ ebpf_read_swap_apps_table(maps_per_core);
ebpf_swap_resume_apps_data();
pthread_mutex_unlock(&collect_data_mutex);
@@ -930,16 +922,17 @@ static void swap_collector(ebpf_module_t *em)
{
int cgroup = em->cgroup_charts;
int update_every = em->update_every;
- heartbeat_t hb;
- heartbeat_init(&hb);
int counter = update_every - 1;
int maps_per_core = em->maps_per_core;
uint32_t running_time = 0;
uint32_t lifetime = em->lifetime;
netdata_idx_t *stats = em->hash_table_stats;
memset(stats, 0, sizeof(em->hash_table_stats));
+
+ heartbeat_t hb;
+ heartbeat_init(&hb, USEC_PER_SEC);
while (!ebpf_plugin_stop() && running_time < lifetime) {
- (void)heartbeat_next(&hb, USEC_PER_SEC);
+ (void)heartbeat_next(&hb);
if (ebpf_plugin_stop() || ++counter != update_every)
continue;
diff --git a/src/collectors/ebpf.plugin/ebpf_sync.c b/src/collectors/ebpf.plugin/ebpf_sync.c
index 2be9192c5..094de7019 100644
--- a/src/collectors/ebpf.plugin/ebpf_sync.c
+++ b/src/collectors/ebpf.plugin/ebpf_sync.c
@@ -100,11 +100,7 @@ ebpf_local_maps_t sync_file_range_maps[] = {{.name = "tbl_syncfr", .internal_inp
#endif
}};
-struct config sync_config = { .first_section = NULL,
- .last_section = NULL,
- .mutex = NETDATA_MUTEX_INITIALIZER,
- .index = { .avl_tree = { .root = NULL, .compar = appconfig_section_compare },
- .rwlock = AVL_LOCK_INITIALIZER } };
+struct config sync_config = APPCONFIG_INITIALIZER;
netdata_ebpf_targets_t sync_targets[] = { {.name = NETDATA_SYSCALLS_SYNC, .mode = EBPF_LOAD_TRAMPOLINE},
{.name = NETDATA_SYSCALLS_SYNCFS, .mode = EBPF_LOAD_TRAMPOLINE},
@@ -558,15 +554,15 @@ static void sync_send_data()
*/
static void sync_collector(ebpf_module_t *em)
{
- heartbeat_t hb;
- heartbeat_init(&hb);
int update_every = em->update_every;
int counter = update_every - 1;
int maps_per_core = em->maps_per_core;
uint32_t running_time = 0;
uint32_t lifetime = em->lifetime;
+ heartbeat_t hb;
+ heartbeat_init(&hb, USEC_PER_SEC);
while (!ebpf_plugin_stop() && running_time < lifetime) {
- (void)heartbeat_next(&hb, USEC_PER_SEC);
+ heartbeat_next(&hb);
if (ebpf_plugin_stop() || ++counter != update_every)
continue;
diff --git a/src/collectors/ebpf.plugin/ebpf_vfs.c b/src/collectors/ebpf.plugin/ebpf_vfs.c
index cf1f50e99..c0c1bee38 100644
--- a/src/collectors/ebpf.plugin/ebpf_vfs.c
+++ b/src/collectors/ebpf.plugin/ebpf_vfs.c
@@ -52,11 +52,7 @@ struct netdata_static_thread ebpf_read_vfs = {
.start_routine = NULL
};
-struct config vfs_config = { .first_section = NULL,
- .last_section = NULL,
- .mutex = NETDATA_MUTEX_INITIALIZER,
- .index = { .avl_tree = { .root = NULL, .compar = appconfig_section_compare },
- .rwlock = AVL_LOCK_INITIALIZER } };
+struct config vfs_config = APPCONFIG_INITIALIZER;
netdata_ebpf_targets_t vfs_targets[] = { {.name = "vfs_write", .mode = EBPF_LOAD_TRAMPOLINE},
{.name = "vfs_writev", .mode = EBPF_LOAD_TRAMPOLINE},
@@ -2064,9 +2060,6 @@ void ebpf_vfs_resume_apps_data() {
*/
void *ebpf_read_vfs_thread(void *ptr)
{
- heartbeat_t hb;
- heartbeat_init(&hb);
-
ebpf_module_t *em = (ebpf_module_t *)ptr;
int maps_per_core = em->maps_per_core;
@@ -2079,11 +2072,12 @@ void *ebpf_read_vfs_thread(void *ptr)
uint32_t lifetime = em->lifetime;
uint32_t running_time = 0;
- usec_t period = update_every * USEC_PER_SEC;
uint32_t max_period = EBPF_CLEANUP_FACTOR;
pids_fd[EBPF_PIDS_VFS_IDX] = vfs_maps[NETDATA_VFS_PID].map_fd;
+ heartbeat_t hb;
+ heartbeat_init(&hb, update_every * USEC_PER_SEC);
while (!ebpf_plugin_stop() && running_time < lifetime) {
- (void)heartbeat_next(&hb, period);
+ heartbeat_next(&hb);
if (ebpf_plugin_stop() || ++counter != update_every)
continue;
@@ -2116,8 +2110,6 @@ void *ebpf_read_vfs_thread(void *ptr)
static void vfs_collector(ebpf_module_t *em)
{
int cgroups = em->cgroup_charts;
- heartbeat_t hb;
- heartbeat_init(&hb);
int update_every = em->update_every;
int counter = update_every - 1;
int maps_per_core = em->maps_per_core;
@@ -2125,8 +2117,10 @@ static void vfs_collector(ebpf_module_t *em)
uint32_t lifetime = em->lifetime;
netdata_idx_t *stats = em->hash_table_stats;
memset(stats, 0, sizeof(em->hash_table_stats));
+ heartbeat_t hb;
+ heartbeat_init(&hb, USEC_PER_SEC);
while (!ebpf_plugin_stop() && running_time < lifetime) {
- (void)heartbeat_next(&hb, USEC_PER_SEC);
+ heartbeat_next(&hb);
if (ebpf_plugin_stop() || ++counter != update_every)
continue;
diff --git a/src/collectors/ebpf.plugin/integrations/ebpf_cachestat.md b/src/collectors/ebpf.plugin/integrations/ebpf_cachestat.md
index 352bc0721..4bfb238ba 100644
--- a/src/collectors/ebpf.plugin/integrations/ebpf_cachestat.md
+++ b/src/collectors/ebpf.plugin/integrations/ebpf_cachestat.md
@@ -145,8 +145,8 @@ Now follow steps:
The configuration file name for this integration is `ebpf.d/cachestat.conf`.
-You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the
+Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
```bash
cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
diff --git a/src/collectors/ebpf.plugin/integrations/ebpf_dcstat.md b/src/collectors/ebpf.plugin/integrations/ebpf_dcstat.md
index 5ca7a6a68..9e6f8ef32 100644
--- a/src/collectors/ebpf.plugin/integrations/ebpf_dcstat.md
+++ b/src/collectors/ebpf.plugin/integrations/ebpf_dcstat.md
@@ -143,8 +143,8 @@ Now follow steps:
The configuration file name for this integration is `ebpf.d/dcstat.conf`.
-You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the
+Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
```bash
cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
diff --git a/src/collectors/ebpf.plugin/integrations/ebpf_disk.md b/src/collectors/ebpf.plugin/integrations/ebpf_disk.md
index 4fc3dc700..7dccc51c4 100644
--- a/src/collectors/ebpf.plugin/integrations/ebpf_disk.md
+++ b/src/collectors/ebpf.plugin/integrations/ebpf_disk.md
@@ -109,8 +109,8 @@ This thread needs to attach a tracepoint to monitor when a process schedule an e
The configuration file name for this integration is `ebpf.d/disk.conf`.
-You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the
+Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
```bash
cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
diff --git a/src/collectors/ebpf.plugin/integrations/ebpf_filedescriptor.md b/src/collectors/ebpf.plugin/integrations/ebpf_filedescriptor.md
index 2f917d183..f9c9aa1a6 100644
--- a/src/collectors/ebpf.plugin/integrations/ebpf_filedescriptor.md
+++ b/src/collectors/ebpf.plugin/integrations/ebpf_filedescriptor.md
@@ -143,8 +143,8 @@ Now follow steps:
The configuration file name for this integration is `ebpf.d/fd.conf`.
-You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the
+Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
```bash
cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
diff --git a/src/collectors/ebpf.plugin/integrations/ebpf_filesystem.md b/src/collectors/ebpf.plugin/integrations/ebpf_filesystem.md
index ea55a6c04..b4b8e490c 100644
--- a/src/collectors/ebpf.plugin/integrations/ebpf_filesystem.md
+++ b/src/collectors/ebpf.plugin/integrations/ebpf_filesystem.md
@@ -130,8 +130,8 @@ Now follow steps:
The configuration file name for this integration is `ebpf.d/filesystem.conf`.
-You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the
+Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
```bash
cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
diff --git a/src/collectors/ebpf.plugin/integrations/ebpf_hardirq.md b/src/collectors/ebpf.plugin/integrations/ebpf_hardirq.md
index d5f79353f..8d77f9ee3 100644
--- a/src/collectors/ebpf.plugin/integrations/ebpf_hardirq.md
+++ b/src/collectors/ebpf.plugin/integrations/ebpf_hardirq.md
@@ -109,8 +109,8 @@ This thread needs to attach a tracepoint to monitor when a process schedule an e
The configuration file name for this integration is `ebpf.d/hardirq.conf`.
-You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the
+Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
```bash
cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
diff --git a/src/collectors/ebpf.plugin/integrations/ebpf_mdflush.md b/src/collectors/ebpf.plugin/integrations/ebpf_mdflush.md
index 369e8958f..663557eca 100644
--- a/src/collectors/ebpf.plugin/integrations/ebpf_mdflush.md
+++ b/src/collectors/ebpf.plugin/integrations/ebpf_mdflush.md
@@ -104,8 +104,8 @@ Now follow steps:
The configuration file name for this integration is `ebpf.d/mdflush.conf`.
-You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the
+Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
```bash
cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
diff --git a/src/collectors/ebpf.plugin/integrations/ebpf_mount.md b/src/collectors/ebpf.plugin/integrations/ebpf_mount.md
index 5e6738e2c..64dcaeacd 100644
--- a/src/collectors/ebpf.plugin/integrations/ebpf_mount.md
+++ b/src/collectors/ebpf.plugin/integrations/ebpf_mount.md
@@ -110,8 +110,8 @@ This thread needs to attach a tracepoint to monitor when a process schedule an e
The configuration file name for this integration is `ebpf.d/mount.conf`.
-You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the
+Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
```bash
cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
diff --git a/src/collectors/ebpf.plugin/integrations/ebpf_oomkill.md b/src/collectors/ebpf.plugin/integrations/ebpf_oomkill.md
index d9e14f4fb..bc40c883b 100644
--- a/src/collectors/ebpf.plugin/integrations/ebpf_oomkill.md
+++ b/src/collectors/ebpf.plugin/integrations/ebpf_oomkill.md
@@ -126,8 +126,8 @@ This thread needs to attach a tracepoint to monitor when a process schedule an e
The configuration file name for this integration is `ebpf.d/oomkill.conf`.
-You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the
+Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
```bash
cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
diff --git a/src/collectors/ebpf.plugin/integrations/ebpf_processes.md b/src/collectors/ebpf.plugin/integrations/ebpf_processes.md
index 8ff091da0..f3bc209d0 100644
--- a/src/collectors/ebpf.plugin/integrations/ebpf_processes.md
+++ b/src/collectors/ebpf.plugin/integrations/ebpf_processes.md
@@ -153,8 +153,8 @@ This thread needs to attach a tracepoint to monitor when a process schedule an e
The configuration file name for this integration is `ebpf.d/process.conf`.
-You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the
+Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
```bash
cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
diff --git a/src/collectors/ebpf.plugin/integrations/ebpf_shm.md b/src/collectors/ebpf.plugin/integrations/ebpf_shm.md
index c65d3a85e..2e037ea30 100644
--- a/src/collectors/ebpf.plugin/integrations/ebpf_shm.md
+++ b/src/collectors/ebpf.plugin/integrations/ebpf_shm.md
@@ -147,8 +147,8 @@ This thread needs to attach a tracepoint to monitor when a process schedule an e
The configuration file name for this integration is `ebpf.d/shm.conf`.
-You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the
+Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
```bash
cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
diff --git a/src/collectors/ebpf.plugin/integrations/ebpf_socket.md b/src/collectors/ebpf.plugin/integrations/ebpf_socket.md
index 917dcaba6..441e72963 100644
--- a/src/collectors/ebpf.plugin/integrations/ebpf_socket.md
+++ b/src/collectors/ebpf.plugin/integrations/ebpf_socket.md
@@ -162,8 +162,8 @@ Now follow steps:
The configuration file name for this integration is `ebpf.d/network.conf`.
-You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the
+Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
```bash
cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
diff --git a/src/collectors/ebpf.plugin/integrations/ebpf_softirq.md b/src/collectors/ebpf.plugin/integrations/ebpf_softirq.md
index 1571dd4b5..e8214cff6 100644
--- a/src/collectors/ebpf.plugin/integrations/ebpf_softirq.md
+++ b/src/collectors/ebpf.plugin/integrations/ebpf_softirq.md
@@ -109,8 +109,8 @@ This thread needs to attach a tracepoint to monitor when a process schedule an e
The configuration file name for this integration is `ebpf.d/softirq.conf`.
-You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the
+Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
```bash
cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
diff --git a/src/collectors/ebpf.plugin/integrations/ebpf_swap.md b/src/collectors/ebpf.plugin/integrations/ebpf_swap.md
index 4358ac71b..0fe6cd6ca 100644
--- a/src/collectors/ebpf.plugin/integrations/ebpf_swap.md
+++ b/src/collectors/ebpf.plugin/integrations/ebpf_swap.md
@@ -136,8 +136,8 @@ Now follow steps:
The configuration file name for this integration is `ebpf.d/swap.conf`.
-You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the
+Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
```bash
cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
diff --git a/src/collectors/ebpf.plugin/integrations/ebpf_sync.md b/src/collectors/ebpf.plugin/integrations/ebpf_sync.md
index 08d69fada..237f340ed 100644
--- a/src/collectors/ebpf.plugin/integrations/ebpf_sync.md
+++ b/src/collectors/ebpf.plugin/integrations/ebpf_sync.md
@@ -117,8 +117,8 @@ This thread needs to attach a tracepoint to monitor when a process schedule an e
The configuration file name for this integration is `ebpf.d/sync.conf`.
-You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the
+Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
```bash
cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
diff --git a/src/collectors/ebpf.plugin/integrations/ebpf_vfs.md b/src/collectors/ebpf.plugin/integrations/ebpf_vfs.md
index 3adb00e9b..bf45d3858 100644
--- a/src/collectors/ebpf.plugin/integrations/ebpf_vfs.md
+++ b/src/collectors/ebpf.plugin/integrations/ebpf_vfs.md
@@ -178,8 +178,8 @@ Now follow steps:
The configuration file name for this integration is `ebpf.d/vfs.conf`.
-You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the
+Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
```bash
cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
diff --git a/src/collectors/freebsd.plugin/README.md b/src/collectors/freebsd.plugin/README.md
index 9fae20aec..e8e7fd42e 100644
--- a/src/collectors/freebsd.plugin/README.md
+++ b/src/collectors/freebsd.plugin/README.md
@@ -1,16 +1,5 @@
-<!--
-title: "FreeBSD system metrics (freebsd.plugin)"
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/collectors/freebsd.plugin/README.md"
-sidebar_label: "FreeBSD system metrics (freebsd.plugin)"
-learn_status: "Published"
-learn_topic_type: "References"
-learn_rel_path: "Integrations/Monitor/System metrics"
--->
-
# FreeBSD system metrics (freebsd.plugin)
Collects resource usage and performance data on FreeBSD systems
By default, Netdata will enable monitoring metrics for disks, memory, and network only when they are not zero. If they are constantly zero they are ignored. Metrics that will start having values, after Netdata is started, will be detected and charts will be automatically added to the dashboard (a refresh of the dashboard is needed for them to appear though). Use `yes` instead of `auto` in plugin configuration sections to enable these charts permanently. You can also set the `enable zero metrics` option to `yes` in the `[global]` section which enables charts with zero metrics for all internal Netdata plugins.
-
-
diff --git a/src/collectors/freebsd.plugin/freebsd_devstat.c b/src/collectors/freebsd.plugin/freebsd_devstat.c
index e0e2e97b8..c3ee43961 100644
--- a/src/collectors/freebsd.plugin/freebsd_devstat.c
+++ b/src/collectors/freebsd.plugin/freebsd_devstat.c
@@ -393,8 +393,6 @@ int do_kern_devstat(int update_every, usec_t dt) {
RRDSET_TYPE_LINE
);
- rrdset_flag_set(dm->st_ops, RRDSET_FLAG_DETAIL);
-
dm->rd_ops_in = rrddim_add(dm->st_ops, "reads", NULL, 1, 1,
RRD_ALGORITHM_INCREMENTAL);
dm->rd_ops_out = rrddim_add(dm->st_ops, "writes", NULL, -1, 1,
@@ -428,8 +426,6 @@ int do_kern_devstat(int update_every, usec_t dt) {
RRDSET_TYPE_LINE
);
- rrdset_flag_set(dm->st_qops, RRDSET_FLAG_DETAIL);
-
dm->rd_qops = rrddim_add(dm->st_qops, "operations", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
}
@@ -453,8 +449,6 @@ int do_kern_devstat(int update_every, usec_t dt) {
RRDSET_TYPE_AREA
);
- rrdset_flag_set(dm->st_util, RRDSET_FLAG_DETAIL);
-
dm->rd_util = rrddim_add(dm->st_util, "utilization", NULL, 1, 10,
RRD_ALGORITHM_INCREMENTAL);
}
@@ -479,8 +473,6 @@ int do_kern_devstat(int update_every, usec_t dt) {
RRDSET_TYPE_LINE
);
- rrdset_flag_set(dm->st_iotime, RRDSET_FLAG_DETAIL);
-
dm->rd_iotime_in = rrddim_add(dm->st_iotime, "reads", NULL, 1, 1,
RRD_ALGORITHM_INCREMENTAL);
dm->rd_iotime_out = rrddim_add(dm->st_iotime, "writes", NULL, -1, 1,
@@ -518,8 +510,6 @@ int do_kern_devstat(int update_every, usec_t dt) {
RRDSET_TYPE_LINE
);
- rrdset_flag_set(dm->st_await, RRDSET_FLAG_DETAIL);
-
dm->rd_await_in = rrddim_add(dm->st_await, "reads", NULL, 1, 1,
RRD_ALGORITHM_ABSOLUTE);
dm->rd_await_out = rrddim_add(dm->st_await, "writes", NULL, -1, 1,
@@ -577,8 +567,6 @@ int do_kern_devstat(int update_every, usec_t dt) {
RRDSET_TYPE_AREA
);
- rrdset_flag_set(dm->st_avagsz, RRDSET_FLAG_DETAIL);
-
dm->rd_avagsz_in = rrddim_add(dm->st_avagsz, "reads", NULL, 1, KILO_FACTOR,
RRD_ALGORITHM_ABSOLUTE);
dm->rd_avagsz_out = rrddim_add(dm->st_avagsz, "writes", NULL, -1, KILO_FACTOR,
@@ -627,8 +615,6 @@ int do_kern_devstat(int update_every, usec_t dt) {
RRDSET_TYPE_LINE
);
- rrdset_flag_set(dm->st_svctm, RRDSET_FLAG_DETAIL);
-
dm->rd_svctm = rrddim_add(dm->st_svctm, "svctm", NULL, 1, 1,
RRD_ALGORITHM_ABSOLUTE);
}
diff --git a/src/collectors/freebsd.plugin/freebsd_getifaddrs.c b/src/collectors/freebsd.plugin/freebsd_getifaddrs.c
index 153ab8b84..a33db85e7 100644
--- a/src/collectors/freebsd.plugin/freebsd_getifaddrs.c
+++ b/src/collectors/freebsd.plugin/freebsd_getifaddrs.c
@@ -297,8 +297,6 @@ int do_getifaddrs(int update_every, usec_t dt) {
RRDSET_TYPE_LINE
);
- rrdset_flag_set(st, RRDSET_FLAG_DETAIL);
-
rd_packets_in = rrddim_add(st, "received", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
rd_packets_out = rrddim_add(st, "sent", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
rd_packets_m_in = rrddim_add(st, "multicast_received", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
@@ -464,8 +462,6 @@ int do_getifaddrs(int update_every, usec_t dt) {
RRDSET_TYPE_LINE
);
- rrdset_flag_set(ifm->st_packets, RRDSET_FLAG_DETAIL);
-
ifm->rd_packets_in = rrddim_add(ifm->st_packets, "received", NULL, 1, 1,
RRD_ALGORITHM_INCREMENTAL);
ifm->rd_packets_out = rrddim_add(ifm->st_packets, "sent", NULL, -1, 1,
@@ -499,8 +495,6 @@ int do_getifaddrs(int update_every, usec_t dt) {
RRDSET_TYPE_LINE
);
- rrdset_flag_set(ifm->st_errors, RRDSET_FLAG_DETAIL);
-
ifm->rd_errors_in = rrddim_add(ifm->st_errors, "inbound", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
ifm->rd_errors_out = rrddim_add(ifm->st_errors, "outbound", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
}
@@ -526,8 +520,6 @@ int do_getifaddrs(int update_every, usec_t dt) {
RRDSET_TYPE_LINE
);
- rrdset_flag_set(ifm->st_drops, RRDSET_FLAG_DETAIL);
-
ifm->rd_drops_in = rrddim_add(ifm->st_drops, "inbound", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
#if __FreeBSD__ >= 11
ifm->rd_drops_out = rrddim_add(ifm->st_drops, "outbound", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
@@ -557,8 +549,6 @@ int do_getifaddrs(int update_every, usec_t dt) {
RRDSET_TYPE_LINE
);
- rrdset_flag_set(ifm->st_events, RRDSET_FLAG_DETAIL);
-
ifm->rd_events_coll = rrddim_add(ifm->st_events, "collisions", NULL, -1, 1,
RRD_ALGORITHM_INCREMENTAL);
}
diff --git a/src/collectors/freebsd.plugin/freebsd_ipfw.c b/src/collectors/freebsd.plugin/freebsd_ipfw.c
index dcb771ce9..b94bf15c2 100644
--- a/src/collectors/freebsd.plugin/freebsd_ipfw.c
+++ b/src/collectors/freebsd.plugin/freebsd_ipfw.c
@@ -168,7 +168,6 @@ int do_ipfw(int update_every, usec_t dt) {
update_every,
RRDSET_TYPE_STACKED
);
- rrdset_flag_set(st_mem, RRDSET_FLAG_DETAIL);
rd_dyn_mem = rrddim_add(st_mem, "dynamic", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
rd_stat_mem = rrddim_add(st_mem, "static", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
diff --git a/src/collectors/freebsd.plugin/freebsd_sysctl.c b/src/collectors/freebsd.plugin/freebsd_sysctl.c
index 0fa710275..525170e47 100644
--- a/src/collectors/freebsd.plugin/freebsd_sysctl.c
+++ b/src/collectors/freebsd.plugin/freebsd_sysctl.c
@@ -271,7 +271,6 @@ int do_vm_vmtotal(int update_every, usec_t dt) {
update_every,
RRDSET_TYPE_AREA
);
- rrdset_flag_set(st, RRDSET_FLAG_DETAIL);
rd = rrddim_add(st, "used", NULL, system_pagesize, MEGA_FACTOR, RRD_ALGORITHM_ABSOLUTE);
}
@@ -798,8 +797,6 @@ int do_vm_stats_sys_v_forks(int update_every, usec_t dt) {
RRDSET_TYPE_LINE
);
- rrdset_flag_set(st, RRDSET_FLAG_DETAIL);
-
rd = rrddim_add(st, "started", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
}
@@ -871,8 +868,6 @@ int do_vm_swap_info(int update_every, usec_t dt) {
RRDSET_TYPE_STACKED
);
- rrdset_flag_set(st, RRDSET_FLAG_DETAIL);
-
rd_free = rrddim_add(st, "free", NULL, system_pagesize, MEGA_FACTOR, RRD_ALGORITHM_ABSOLUTE);
rd_used = rrddim_add(st, "used", NULL, system_pagesize, MEGA_FACTOR, RRD_ALGORITHM_ABSOLUTE);
}
@@ -1081,8 +1076,6 @@ int do_vm_stats_sys_v_pgfaults(int update_every, usec_t dt) {
RRDSET_TYPE_LINE
);
- rrdset_flag_set(st, RRDSET_FLAG_DETAIL);
-
rd_memory = rrddim_add(st, "memory", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
rd_io_requiring = rrddim_add(st, "io_requiring", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
rd_cow = rrddim_add(st, "cow", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
@@ -1756,8 +1749,6 @@ int do_net_inet_tcp_stats(int update_every, usec_t dt) {
RRDSET_TYPE_LINE
);
- rrdset_flag_set(st, RRDSET_FLAG_DETAIL);
-
rd_in_errs = rrddim_add(st, "InErrs", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
rd_in_csum_errs = rrddim_add(st, "InCsumErrors", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
rd_retrans_segs = rrddim_add(st, "RetransSegs", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
@@ -1795,8 +1786,6 @@ int do_net_inet_tcp_stats(int update_every, usec_t dt) {
RRDSET_TYPE_LINE
);
- rrdset_flag_set(st, RRDSET_FLAG_DETAIL);
-
rd_estab_resets = rrddim_add(st, "EstabResets", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
rd_active_opens = rrddim_add(st, "ActiveOpens", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
rd_passive_opens = rrddim_add(st, "PassiveOpens", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
@@ -1971,8 +1960,6 @@ int do_net_inet_tcp_stats(int update_every, usec_t dt) {
RRDSET_TYPE_LINE
);
- rrdset_flag_set(st, RRDSET_FLAG_DETAIL);
-
rd_rcvce = rrddim_add(st, "InCEPkts", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
#if __FreeBSD_version < 1400074
rd_ect0 = rrddim_add(st, "ECT0Pkts", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
@@ -2082,8 +2069,6 @@ int do_net_inet_udp_stats(int update_every, usec_t dt) {
RRDSET_TYPE_LINE
);
- rrdset_flag_set(st, RRDSET_FLAG_DETAIL);
-
rd_in_errors = rrddim_add(st, "InErrors", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
rd_no_ports = rrddim_add(st, "NoPorts", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
rd_recv_buf_errors = rrddim_add(st, "RcvbufErrors", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
@@ -2332,8 +2317,6 @@ int do_net_inet_ip_stats(int update_every, usec_t dt) {
RRDSET_TYPE_LINE
);
- rrdset_flag_set(st, RRDSET_FLAG_DETAIL);
-
rd_ok = rrddim_add(st, "FragOKs", "ok", 1, 1, RRD_ALGORITHM_INCREMENTAL);
rd_fails = rrddim_add(st, "FragFails", "failed", -1, 1, RRD_ALGORITHM_INCREMENTAL);
rd_created = rrddim_add(st, "FragCreates", "created", 1, 1, RRD_ALGORITHM_INCREMENTAL);
@@ -2365,8 +2348,6 @@ int do_net_inet_ip_stats(int update_every, usec_t dt) {
RRDSET_TYPE_LINE
);
- rrdset_flag_set(st, RRDSET_FLAG_DETAIL);
-
rd_ok = rrddim_add(st, "ReasmOKs", "ok", 1, 1, RRD_ALGORITHM_INCREMENTAL);
rd_failed = rrddim_add(st, "ReasmFails", "failed", -1, 1, RRD_ALGORITHM_INCREMENTAL);
rd_all = rrddim_add(st, "ReasmReqds", "all", 1, 1, RRD_ALGORITHM_INCREMENTAL);
@@ -2400,8 +2381,6 @@ int do_net_inet_ip_stats(int update_every, usec_t dt) {
RRDSET_TYPE_LINE
);
- rrdset_flag_set(st, RRDSET_FLAG_DETAIL);
-
rd_in_discards = rrddim_add(st, "InDiscards", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
rd_out_discards = rrddim_add(st, "OutDiscards", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
rd_in_hdr_errors = rrddim_add(st, "InHdrErrors", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
@@ -2519,8 +2498,6 @@ int do_net_inet6_ip6_stats(int update_every, usec_t dt) {
RRDSET_TYPE_LINE
);
- rrdset_flag_set(st, RRDSET_FLAG_DETAIL);
-
rd_ok = rrddim_add(st, "ok", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
rd_failed = rrddim_add(st, "failed", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
rd_all = rrddim_add(st, "all", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
@@ -2554,8 +2531,6 @@ int do_net_inet6_ip6_stats(int update_every, usec_t dt) {
RRDSET_TYPE_LINE
);
- rrdset_flag_set(st, RRDSET_FLAG_DETAIL);
-
rd_ok = rrddim_add(st, "ok", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
rd_failed = rrddim_add(st, "failed", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
rd_timeout = rrddim_add(st, "timeout", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
@@ -2593,8 +2568,6 @@ int do_net_inet6_ip6_stats(int update_every, usec_t dt) {
RRDSET_TYPE_LINE
);
- rrdset_flag_set(st, RRDSET_FLAG_DETAIL);
-
rd_in_discards = rrddim_add(st, "InDiscards", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
rd_out_discards = rrddim_add(st, "OutDiscards", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
rd_in_hdr_errors = rrddim_add(st, "InHdrErrors", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
diff --git a/src/collectors/freebsd.plugin/integrations/dev.cpu.0.freq.md b/src/collectors/freebsd.plugin/integrations/dev.cpu.0.freq.md
index 322b3fd5d..13415c7a6 100644
--- a/src/collectors/freebsd.plugin/integrations/dev.cpu.0.freq.md
+++ b/src/collectors/freebsd.plugin/integrations/dev.cpu.0.freq.md
@@ -86,8 +86,8 @@ The configuration file name for this integration is `Config options`.
Configuration for this specific integration is located in the `[plugin:freebsd]` section within that file.
-You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the
+Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
```bash
cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
diff --git a/src/collectors/freebsd.plugin/integrations/dev.cpu.temperature.md b/src/collectors/freebsd.plugin/integrations/dev.cpu.temperature.md
index 38bbba341..326928bb7 100644
--- a/src/collectors/freebsd.plugin/integrations/dev.cpu.temperature.md
+++ b/src/collectors/freebsd.plugin/integrations/dev.cpu.temperature.md
@@ -95,8 +95,8 @@ The file format is a modified INI syntax. The general structure is:
[section2]
option3 = some third value
```
-You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the
+Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
```bash
cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
diff --git a/src/collectors/freebsd.plugin/integrations/devstat.md b/src/collectors/freebsd.plugin/integrations/devstat.md
index 1cc2795b4..56e69d382 100644
--- a/src/collectors/freebsd.plugin/integrations/devstat.md
+++ b/src/collectors/freebsd.plugin/integrations/devstat.md
@@ -119,8 +119,8 @@ The file format is a modified INI syntax. The general structure is:
[section2]
option3 = some third value
```
-You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the
+Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
```bash
cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
diff --git a/src/collectors/freebsd.plugin/integrations/getifaddrs.md b/src/collectors/freebsd.plugin/integrations/getifaddrs.md
index ce9d9e337..41845cf62 100644
--- a/src/collectors/freebsd.plugin/integrations/getifaddrs.md
+++ b/src/collectors/freebsd.plugin/integrations/getifaddrs.md
@@ -125,8 +125,8 @@ The file format is a modified INI syntax. The general structure is:
[section2]
option3 = some third value
```
-You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the
+Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
```bash
cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
diff --git a/src/collectors/freebsd.plugin/integrations/getmntinfo.md b/src/collectors/freebsd.plugin/integrations/getmntinfo.md
index 186487d11..1779cfd1b 100644
--- a/src/collectors/freebsd.plugin/integrations/getmntinfo.md
+++ b/src/collectors/freebsd.plugin/integrations/getmntinfo.md
@@ -102,8 +102,8 @@ The file format is a modified INI syntax. The general structure is:
[section2]
option3 = some third value
```
-You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the
+Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
```bash
cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
diff --git a/src/collectors/freebsd.plugin/integrations/hw.intrcnt.md b/src/collectors/freebsd.plugin/integrations/hw.intrcnt.md
index 713d388f9..b18e9afd0 100644
--- a/src/collectors/freebsd.plugin/integrations/hw.intrcnt.md
+++ b/src/collectors/freebsd.plugin/integrations/hw.intrcnt.md
@@ -96,8 +96,8 @@ The file format is a modified INI syntax. The general structure is:
[section2]
option3 = some third value
```
-You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the
+Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
```bash
cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
diff --git a/src/collectors/freebsd.plugin/integrations/ipfw.md b/src/collectors/freebsd.plugin/integrations/ipfw.md
index 33aa4a249..59b93f2a1 100644
--- a/src/collectors/freebsd.plugin/integrations/ipfw.md
+++ b/src/collectors/freebsd.plugin/integrations/ipfw.md
@@ -99,8 +99,8 @@ The file format is a modified INI syntax. The general structure is:
[section2]
option3 = some third value
```
-You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the
+Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
```bash
cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
diff --git a/src/collectors/freebsd.plugin/integrations/kern.cp_time.md b/src/collectors/freebsd.plugin/integrations/kern.cp_time.md
index 158e7fc1e..4bf39aa1b 100644
--- a/src/collectors/freebsd.plugin/integrations/kern.cp_time.md
+++ b/src/collectors/freebsd.plugin/integrations/kern.cp_time.md
@@ -114,8 +114,8 @@ The file format is a modified INI syntax. The general structure is:
[section2]
option3 = some third value
```
-You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the
+Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
```bash
cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
diff --git a/src/collectors/freebsd.plugin/integrations/kern.ipc.msq.md b/src/collectors/freebsd.plugin/integrations/kern.ipc.msq.md
index a0c6504f2..987d73029 100644
--- a/src/collectors/freebsd.plugin/integrations/kern.ipc.msq.md
+++ b/src/collectors/freebsd.plugin/integrations/kern.ipc.msq.md
@@ -97,8 +97,8 @@ The file format is a modified INI syntax. The general structure is:
[section2]
option3 = some third value
```
-You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the
+Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
```bash
cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
diff --git a/src/collectors/freebsd.plugin/integrations/kern.ipc.sem.md b/src/collectors/freebsd.plugin/integrations/kern.ipc.sem.md
index 71f5605e8..73f603690 100644
--- a/src/collectors/freebsd.plugin/integrations/kern.ipc.sem.md
+++ b/src/collectors/freebsd.plugin/integrations/kern.ipc.sem.md
@@ -102,8 +102,8 @@ The file format is a modified INI syntax. The general structure is:
[section2]
option3 = some third value
```
-You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the
+Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
```bash
cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
diff --git a/src/collectors/freebsd.plugin/integrations/kern.ipc.shm.md b/src/collectors/freebsd.plugin/integrations/kern.ipc.shm.md
index 278445e64..e9691013d 100644
--- a/src/collectors/freebsd.plugin/integrations/kern.ipc.shm.md
+++ b/src/collectors/freebsd.plugin/integrations/kern.ipc.shm.md
@@ -96,8 +96,8 @@ The file format is a modified INI syntax. The general structure is:
[section2]
option3 = some third value
```
-You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the
+Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
```bash
cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
diff --git a/src/collectors/freebsd.plugin/integrations/net.inet.icmp.stats.md b/src/collectors/freebsd.plugin/integrations/net.inet.icmp.stats.md
index 42ceb19ca..80b18fabd 100644
--- a/src/collectors/freebsd.plugin/integrations/net.inet.icmp.stats.md
+++ b/src/collectors/freebsd.plugin/integrations/net.inet.icmp.stats.md
@@ -97,8 +97,8 @@ The file format is a modified INI syntax. The general structure is:
[section2]
option3 = some third value
```
-You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the
+Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
```bash
cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
diff --git a/src/collectors/freebsd.plugin/integrations/net.inet.ip.stats.md b/src/collectors/freebsd.plugin/integrations/net.inet.ip.stats.md
index 8c5c4355d..538a6054a 100644
--- a/src/collectors/freebsd.plugin/integrations/net.inet.ip.stats.md
+++ b/src/collectors/freebsd.plugin/integrations/net.inet.ip.stats.md
@@ -98,8 +98,8 @@ The file format is a modified INI syntax. The general structure is:
[section2]
option3 = some third value
```
-You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the
+Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
```bash
cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
diff --git a/src/collectors/freebsd.plugin/integrations/net.inet.tcp.states.md b/src/collectors/freebsd.plugin/integrations/net.inet.tcp.states.md
index 41bacfedd..8f56f0150 100644
--- a/src/collectors/freebsd.plugin/integrations/net.inet.tcp.states.md
+++ b/src/collectors/freebsd.plugin/integrations/net.inet.tcp.states.md
@@ -100,8 +100,8 @@ The file format is a modified INI syntax. The general structure is:
[section2]
option3 = some third value
```
-You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the
+Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
```bash
cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
diff --git a/src/collectors/freebsd.plugin/integrations/net.inet.tcp.stats.md b/src/collectors/freebsd.plugin/integrations/net.inet.tcp.stats.md
index 259846ea1..b2beb3681 100644
--- a/src/collectors/freebsd.plugin/integrations/net.inet.tcp.stats.md
+++ b/src/collectors/freebsd.plugin/integrations/net.inet.tcp.stats.md
@@ -110,8 +110,8 @@ The file format is a modified INI syntax. The general structure is:
[section2]
option3 = some third value
```
-You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the
+Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
```bash
cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
diff --git a/src/collectors/freebsd.plugin/integrations/net.inet.udp.stats.md b/src/collectors/freebsd.plugin/integrations/net.inet.udp.stats.md
index ec672a686..2dd3fc06f 100644
--- a/src/collectors/freebsd.plugin/integrations/net.inet.udp.stats.md
+++ b/src/collectors/freebsd.plugin/integrations/net.inet.udp.stats.md
@@ -102,8 +102,8 @@ The file format is a modified INI syntax. The general structure is:
[section2]
option3 = some third value
```
-You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the
+Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
```bash
cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
diff --git a/src/collectors/freebsd.plugin/integrations/net.inet6.icmp6.stats.md b/src/collectors/freebsd.plugin/integrations/net.inet6.icmp6.stats.md
index fe23457f6..e0fe3bd90 100644
--- a/src/collectors/freebsd.plugin/integrations/net.inet6.icmp6.stats.md
+++ b/src/collectors/freebsd.plugin/integrations/net.inet6.icmp6.stats.md
@@ -101,8 +101,8 @@ The file format is a modified INI syntax. The general structure is:
[section2]
option3 = some third value
```
-You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the
+Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
```bash
cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
diff --git a/src/collectors/freebsd.plugin/integrations/net.inet6.ip6.stats.md b/src/collectors/freebsd.plugin/integrations/net.inet6.ip6.stats.md
index ac4015787..ecfac70e9 100644
--- a/src/collectors/freebsd.plugin/integrations/net.inet6.ip6.stats.md
+++ b/src/collectors/freebsd.plugin/integrations/net.inet6.ip6.stats.md
@@ -98,8 +98,8 @@ The file format is a modified INI syntax. The general structure is:
[section2]
option3 = some third value
```
-You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the
+Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
```bash
cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
diff --git a/src/collectors/freebsd.plugin/integrations/net.isr.md b/src/collectors/freebsd.plugin/integrations/net.isr.md
index f9819be80..962ffc3ee 100644
--- a/src/collectors/freebsd.plugin/integrations/net.isr.md
+++ b/src/collectors/freebsd.plugin/integrations/net.isr.md
@@ -114,8 +114,8 @@ The file format is a modified INI syntax. The general structure is:
[section2]
option3 = some third value
```
-You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the
+Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
```bash
cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
diff --git a/src/collectors/freebsd.plugin/integrations/system.ram.md b/src/collectors/freebsd.plugin/integrations/system.ram.md
index b11b39390..791bcf3f7 100644
--- a/src/collectors/freebsd.plugin/integrations/system.ram.md
+++ b/src/collectors/freebsd.plugin/integrations/system.ram.md
@@ -104,8 +104,8 @@ The file format is a modified INI syntax. The general structure is:
[section2]
option3 = some third value
```
-You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the
+Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
```bash
cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
diff --git a/src/collectors/freebsd.plugin/integrations/uptime.md b/src/collectors/freebsd.plugin/integrations/uptime.md
index 58ad767ec..cc2e7091e 100644
--- a/src/collectors/freebsd.plugin/integrations/uptime.md
+++ b/src/collectors/freebsd.plugin/integrations/uptime.md
@@ -95,8 +95,8 @@ The file format is a modified INI syntax. The general structure is:
[section2]
option3 = some third value
```
-You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the
+Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
```bash
cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
diff --git a/src/collectors/freebsd.plugin/integrations/vm.loadavg.md b/src/collectors/freebsd.plugin/integrations/vm.loadavg.md
index f6ae59e7e..4c97eda2e 100644
--- a/src/collectors/freebsd.plugin/integrations/vm.loadavg.md
+++ b/src/collectors/freebsd.plugin/integrations/vm.loadavg.md
@@ -103,8 +103,8 @@ The file format is a modified INI syntax. The general structure is:
[section2]
option3 = some third value
```
-You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the
+Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
```bash
cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
diff --git a/src/collectors/freebsd.plugin/integrations/vm.stats.sys.v_intr.md b/src/collectors/freebsd.plugin/integrations/vm.stats.sys.v_intr.md
index 7f1d88ed7..2d28bbf75 100644
--- a/src/collectors/freebsd.plugin/integrations/vm.stats.sys.v_intr.md
+++ b/src/collectors/freebsd.plugin/integrations/vm.stats.sys.v_intr.md
@@ -95,8 +95,8 @@ The file format is a modified INI syntax. The general structure is:
[section2]
option3 = some third value
```
-You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the
+Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
```bash
cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
diff --git a/src/collectors/freebsd.plugin/integrations/vm.stats.sys.v_soft.md b/src/collectors/freebsd.plugin/integrations/vm.stats.sys.v_soft.md
index baa102d2c..670ad88a0 100644
--- a/src/collectors/freebsd.plugin/integrations/vm.stats.sys.v_soft.md
+++ b/src/collectors/freebsd.plugin/integrations/vm.stats.sys.v_soft.md
@@ -95,8 +95,8 @@ The file format is a modified INI syntax. The general structure is:
[section2]
option3 = some third value
```
-You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the
+Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
```bash
cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
diff --git a/src/collectors/freebsd.plugin/integrations/vm.stats.sys.v_swtch.md b/src/collectors/freebsd.plugin/integrations/vm.stats.sys.v_swtch.md
index 569f50ed6..7cd30fea1 100644
--- a/src/collectors/freebsd.plugin/integrations/vm.stats.sys.v_swtch.md
+++ b/src/collectors/freebsd.plugin/integrations/vm.stats.sys.v_swtch.md
@@ -96,8 +96,8 @@ The file format is a modified INI syntax. The general structure is:
[section2]
option3 = some third value
```
-You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the
+Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
```bash
cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
diff --git a/src/collectors/freebsd.plugin/integrations/vm.stats.vm.v_pgfaults.md b/src/collectors/freebsd.plugin/integrations/vm.stats.vm.v_pgfaults.md
index a99e24df9..4fcf3433e 100644
--- a/src/collectors/freebsd.plugin/integrations/vm.stats.vm.v_pgfaults.md
+++ b/src/collectors/freebsd.plugin/integrations/vm.stats.vm.v_pgfaults.md
@@ -95,8 +95,8 @@ The file format is a modified INI syntax. The general structure is:
[section2]
option3 = some third value
```
-You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the
+Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
```bash
cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
diff --git a/src/collectors/freebsd.plugin/integrations/vm.stats.vm.v_swappgs.md b/src/collectors/freebsd.plugin/integrations/vm.stats.vm.v_swappgs.md
index fd595e2cc..8613fe30e 100644
--- a/src/collectors/freebsd.plugin/integrations/vm.stats.vm.v_swappgs.md
+++ b/src/collectors/freebsd.plugin/integrations/vm.stats.vm.v_swappgs.md
@@ -100,8 +100,8 @@ The file format is a modified INI syntax. The general structure is:
[section2]
option3 = some third value
```
-You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the
+Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
```bash
cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
diff --git a/src/collectors/freebsd.plugin/integrations/vm.swap_info.md b/src/collectors/freebsd.plugin/integrations/vm.swap_info.md
index a92689a15..978d067c3 100644
--- a/src/collectors/freebsd.plugin/integrations/vm.swap_info.md
+++ b/src/collectors/freebsd.plugin/integrations/vm.swap_info.md
@@ -100,8 +100,8 @@ The file format is a modified INI syntax. The general structure is:
[section2]
option3 = some third value
```
-You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the
+Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
```bash
cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
diff --git a/src/collectors/freebsd.plugin/integrations/vm.vmtotal.md b/src/collectors/freebsd.plugin/integrations/vm.vmtotal.md
index 3b3955de4..69c98a7f2 100644
--- a/src/collectors/freebsd.plugin/integrations/vm.vmtotal.md
+++ b/src/collectors/freebsd.plugin/integrations/vm.vmtotal.md
@@ -102,8 +102,8 @@ The file format is a modified INI syntax. The general structure is:
[section2]
option3 = some third value
```
-You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the
+Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
```bash
cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
diff --git a/src/collectors/freebsd.plugin/integrations/zfs.md b/src/collectors/freebsd.plugin/integrations/zfs.md
index d34a5c5ca..4f0538450 100644
--- a/src/collectors/freebsd.plugin/integrations/zfs.md
+++ b/src/collectors/freebsd.plugin/integrations/zfs.md
@@ -127,8 +127,8 @@ The file format is a modified INI syntax. The general structure is:
[section2]
option3 = some third value
```
-You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the
+Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
```bash
cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
diff --git a/src/collectors/freebsd.plugin/plugin_freebsd.c b/src/collectors/freebsd.plugin/plugin_freebsd.c
index 10f7e66b9..225534373 100644
--- a/src/collectors/freebsd.plugin/plugin_freebsd.c
+++ b/src/collectors/freebsd.plugin/plugin_freebsd.c
@@ -105,14 +105,13 @@ void *freebsd_main(void *ptr)
worker_register_job_name(i, freebsd_modules[i].dim);
}
- usec_t step = localhost->rrd_update_every * USEC_PER_SEC;
heartbeat_t hb;
- heartbeat_init(&hb);
+ heartbeat_init(&hb, localhost->rrd_update_every * USEC_PER_SEC);
while(service_running(SERVICE_COLLECTORS)) {
worker_is_idle();
- usec_t hb_dt = heartbeat_next(&hb, step);
+ usec_t hb_dt = heartbeat_next(&hb);
if (!service_running(SERVICE_COLLECTORS))
break;
diff --git a/src/collectors/freeipmi.plugin/freeipmi_plugin.c b/src/collectors/freeipmi.plugin/freeipmi_plugin.c
index 38fb1d19b..a0eb0783d 100644
--- a/src/collectors/freeipmi.plugin/freeipmi_plugin.c
+++ b/src/collectors/freeipmi.plugin/freeipmi_plugin.c
@@ -1240,9 +1240,9 @@ void *netdata_ipmi_collection_thread(void *ptr) {
usec_t step = t->freq_s * USEC_PER_SEC;
heartbeat_t hb;
- heartbeat_init(&hb);
+ heartbeat_init(&hb, step);
while(++iteration) {
- heartbeat_next(&hb, step);
+ heartbeat_next(&hb);
if(t->debug)
fprintf(stderr, "%s: calling netdata_ipmi_collect_data() for %s\n",
@@ -1488,7 +1488,7 @@ static void freeimi_function_sensors(const char *transaction, char *function __m
char function_copy[strlen(function) + 1];
memcpy(function_copy, function, sizeof(function_copy));
char *words[1024];
- size_t num_words = quoted_strings_splitter_pluginsd(function_copy, words, 1024);
+ size_t num_words = quoted_strings_splitter_whitespace(function_copy, words, 1024);
for(size_t i = 1; i < num_words ;i++) {
char *param = get_word(words, num_words, i);
if(strcmp(param, "info") == 0) {
@@ -1629,7 +1629,10 @@ close_and_send:
buffer_json_member_add_time_t(wb, "expires", now_s + update_every);
buffer_json_finalize(wb);
- pluginsd_function_result_to_stdout(transaction, HTTP_RESP_OK, "application/json", now_s + update_every, wb);
+ wb->response_code = HTTP_RESP_OK;
+ wb->content_type = CT_APPLICATION_JSON;
+ wb->expires = now_s + update_every;
+ pluginsd_function_result_to_stdout(transaction, wb);
buffer_free(wb);
}
@@ -1637,14 +1640,13 @@ close_and_send:
// ----------------------------------------------------------------------------
// main, command line arguments parsing
-static NORETURN void plugin_exit(int code) {
+static void plugin_exit(int code) {
fflush(stdout);
function_plugin_should_exit = true;
exit(code);
}
int main (int argc, char **argv) {
- clocks_init();
nd_log_initialize_for_external_plugins("freeipmi.plugin");
netdata_threads_init_for_external_plugins(0); // set the default threads stack size here
@@ -1997,15 +1999,13 @@ int main (int argc, char **argv) {
time_t started_t = now_monotonic_sec();
size_t iteration = 0;
- usec_t step = 100 * USEC_PER_MS;
bool global_chart_created = false;
bool tty = isatty(fileno(stdout)) == 1;
heartbeat_t hb;
- heartbeat_init(&hb);
-
+ heartbeat_init(&hb, update_every * USEC_PER_SEC);
for(iteration = 0; 1 ; iteration++) {
- usec_t dt = heartbeat_next(&hb, step);
+ usec_t dt = heartbeat_next(&hb);
if (!tty) {
netdata_mutex_lock(&stdout_mutex);
@@ -2024,7 +2024,6 @@ int main (int argc, char **argv) {
switch(state.sensors.status) {
case ICS_RUNNING:
- step = update_every * USEC_PER_SEC;
if(state.sensors.last_iteration_ut < now_monotonic_usec() - IPMI_RESTART_IF_SENSORS_DONT_ITERATE_EVERY_SECONDS * USEC_PER_SEC) {
collector_error("%s(): sensors have not be collected for %zu seconds. Exiting to restart.",
__FUNCTION__, (size_t)((now_monotonic_usec() - state.sensors.last_iteration_ut) / USEC_PER_SEC));
@@ -2041,11 +2040,13 @@ int main (int argc, char **argv) {
collector_error("%s(): sensors failed to initialize. Calling DISABLE.", __FUNCTION__);
fprintf(stdout, "DISABLE\n");
plugin_exit(0);
+ break;
case ICS_FAILED:
collector_error("%s(): sensors fails repeatedly to collect metrics. Exiting to restart.", __FUNCTION__);
fprintf(stdout, "EXIT\n");
plugin_exit(0);
+ break;
}
if(netdata_do_sel) {
diff --git a/src/collectors/freeipmi.plugin/integrations/intelligent_platform_management_interface_ipmi.md b/src/collectors/freeipmi.plugin/integrations/intelligent_platform_management_interface_ipmi.md
index 9bd75f975..284db5199 100644
--- a/src/collectors/freeipmi.plugin/integrations/intelligent_platform_management_interface_ipmi.md
+++ b/src/collectors/freeipmi.plugin/integrations/intelligent_platform_management_interface_ipmi.md
@@ -143,8 +143,8 @@ The file format is a modified INI syntax. The general structure is:
[section2]
option3 = some third value
```
-You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the
+Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
```bash
cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
diff --git a/src/collectors/idlejitter.plugin/integrations/idle_os_jitter.md b/src/collectors/idlejitter.plugin/integrations/idle_os_jitter.md
index d5baa094a..0cb3a6e0b 100644
--- a/src/collectors/idlejitter.plugin/integrations/idle_os_jitter.md
+++ b/src/collectors/idlejitter.plugin/integrations/idle_os_jitter.md
@@ -96,8 +96,8 @@ The file format is a modified INI syntax. The general structure is:
[section2]
option3 = some third value
```
-You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the
+Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
```bash
cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
@@ -110,7 +110,7 @@ This integration only supports a single configuration option, and most users wil
| Name | Description | Default | Required |
|:----|:-----------|:-------|:--------:|
-| loop time in ms | Specifies the target time for the data collection thread to sleep, measured in miliseconds. | 20 | no |
+| loop time | Specifies the target time for the data collection thread to sleep, measured in miliseconds. | 20ms | no |
#### Examples
There are no configuration examples.
diff --git a/src/collectors/idlejitter.plugin/metadata.yaml b/src/collectors/idlejitter.plugin/metadata.yaml
index 0ad946994..7c49a6ec7 100644
--- a/src/collectors/idlejitter.plugin/metadata.yaml
+++ b/src/collectors/idlejitter.plugin/metadata.yaml
@@ -55,10 +55,10 @@ modules:
title: ''
enabled: false
list:
- - name: loop time in ms
+ - name: loop time
description: >
Specifies the target time for the data collection thread to sleep, measured in miliseconds.
- default_value: 20
+ default_value: 20ms
required: false
examples:
folding:
diff --git a/src/collectors/idlejitter.plugin/plugin_idlejitter.c b/src/collectors/idlejitter.plugin/plugin_idlejitter.c
index 99645b1d2..2a212a669 100644
--- a/src/collectors/idlejitter.plugin/plugin_idlejitter.c
+++ b/src/collectors/idlejitter.plugin/plugin_idlejitter.c
@@ -22,9 +22,9 @@ void *cpuidlejitter_main(void *ptr) {
worker_register("IDLEJITTER");
worker_register_job_name(0, "measurements");
- usec_t sleep_ut = config_get_number("plugin:idlejitter", "loop time in ms", CPU_IDLEJITTER_SLEEP_TIME_MS) * USEC_PER_MS;
+ usec_t sleep_ut = config_get_duration_ms("plugin:idlejitter", "loop time", CPU_IDLEJITTER_SLEEP_TIME_MS) * USEC_PER_MS;
if(sleep_ut <= 0) {
- config_set_number("plugin:idlejitter", "loop time in ms", CPU_IDLEJITTER_SLEEP_TIME_MS);
+ config_set_duration_ms("plugin:idlejitter", "loop time", CPU_IDLEJITTER_SLEEP_TIME_MS);
sleep_ut = CPU_IDLEJITTER_SLEEP_TIME_MS * USEC_PER_MS;
}
diff --git a/src/collectors/ioping.plugin/integrations/ioping.md b/src/collectors/ioping.plugin/integrations/ioping.md
index 24630ae39..c3a697a1a 100644
--- a/src/collectors/ioping.plugin/integrations/ioping.md
+++ b/src/collectors/ioping.plugin/integrations/ioping.md
@@ -94,8 +94,8 @@ You can install the command by passing the argument `install` to the plugin (`/u
The configuration file name for this integration is `ioping.conf`.
-You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the
+Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
```bash
cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
diff --git a/src/collectors/log2journal/README.md b/src/collectors/log2journal/README.md
index 9807b33ee..d9764d5d5 100644
--- a/src/collectors/log2journal/README.md
+++ b/src/collectors/log2journal/README.md
@@ -1,4 +1,3 @@
-
# log2journal
`log2journal` and `systemd-cat-native` can be used to convert a structured log file, such as the ones generated by web servers, into `systemd-journal` entries.
@@ -11,7 +10,6 @@ The result is like this: nginx logs into systemd-journal:
![image](https://github.com/netdata/netdata/assets/2662304/16b471ff-c5a1-4fcc-bcd5-83551e089f6c)
-
The overall process looks like this:
```bash
@@ -23,7 +21,8 @@ tail -F /var/log/nginx/*.log |\ # outputs log lines
These are the steps:
1. `tail -F /var/log/nginx/*.log`<br/>this command will tail all `*.log` files in `/var/log/nginx/`. We use `-F` instead of `-f` to ensure that files will still be tailed after log rotation.
-2. `log2joural` is a Netdata program. It reads log entries and extracts fields, according to the PCRE2 pattern it accepts. It can also apply some basic operations on the fields, like injecting new fields or duplicating existing ones or rewriting their values. The output of `log2journal` is in Systemd Journal Export Format, and it looks like this:
+2. `log2journal` is a Netdata program. It reads log entries and extracts fields, according to the PCRE2 pattern it accepts. It can also apply some basic operations on the fields, like injecting new fields or duplicating existing ones or rewriting their values. The output of `log2journal` is in Systemd Journal Export Format, and it looks like this:
+
```bash
KEY1=VALUE1 # << start of the first log line
KEY2=VALUE2
@@ -31,8 +30,8 @@ These are the steps:
KEY1=VALUE1 # << start of the second log line
KEY2=VALUE2
```
-3. `systemd-cat-native` is a Netdata program. I can send the logs to a local `systemd-journald` (journal namespaces supported), or to a remote `systemd-journal-remote`.
+3. `systemd-cat-native` is a Netdata program. I can send the logs to a local `systemd-journald` (journal namespaces supported), or to a remote `systemd-journal-remote`.
## Processing pipeline
@@ -44,19 +43,19 @@ The sequence of processing in Netdata's `log2journal` is designed to methodicall
2. **Extract Fields and Values**<br/>
Based on the input format (JSON, logfmt, or custom pattern), it extracts fields and their values from each log line. In the case of JSON and logfmt, it automatically extracts all fields. For custom patterns, it uses PCRE2 regular expressions, and fields are extracted based on sub-expressions defined in the pattern.
-3. **Transliteration**<br/>
+3. **Transliteration**<br/>
Extracted fields are transliterated to the limited character set accepted by systemd-journal: capitals A-Z, digits 0-9, underscores.
4. **Apply Optional Prefix**<br/>
If a prefix is specified, it is added to all keys. This happens before any other processing so that all subsequent matches and manipulations take the prefix into account.
-5. **Rename Fields**<br/>
+5. **Rename Fields**<br/>
Renames fields as specified in the configuration. This is used to change the names of the fields to match desired or required naming conventions.
6. **Inject New Fields**<br/>
New fields are injected into the log data. This can include constants or values derived from other fields, using variable substitution.
-7. **Rewrite Field Values**<br/>
+7. **Rewrite Field Values**<br/>
Applies rewriting rules to alter the values of the fields. This can involve complex transformations, including regular expressions and variable substitutions. The rewrite rules can also inject new fields into the data.
8. **Filter Fields**<br/>
@@ -81,7 +80,7 @@ We have an nginx server logging in this standard combined log format:
First, let's find the right pattern for `log2journal`. We ask ChatGPT:
-```
+```text
My nginx log uses this log format:
log_format access '$remote_addr - $remote_user [$time_local] '
@@ -122,11 +121,11 @@ ChatGPT replies with this:
Let's see what the above says:
1. `(?x)`: enable PCRE2 extended mode. In this mode spaces and newlines in the pattern are ignored. To match a space you have to use `\s`. This mode allows us to split the pattern is multiple lines and add comments to it.
-1. `^`: match the beginning of the line
-2. `(?<remote_addr[^ ]+)`: match anything up to the first space (`[^ ]+`), and name it `remote_addr`.
-3. `\s`: match a space
-4. `-`: match a hyphen
-5. and so on...
+2. `^`: match the beginning of the line
+3. `(?<remote_addr[^ ]+)`: match anything up to the first space (`[^ ]+`), and name it `remote_addr`.
+4. `\s`: match a space
+5. `-`: match a hyphen
+6. and so on...
We edit `nginx.yaml` and add it, like this:
@@ -427,7 +426,6 @@ Rewrite rules are powerful. You can have named groups in them, like in the main
Now the message is ready to be sent to a systemd-journal. For this we use `systemd-cat-native`. This command can send such messages to a journal running on the localhost, a local journal namespace, or a `systemd-journal-remote` running on another server. By just appending `| systemd-cat-native` to the command, the message will be sent to the local journal.
-
```bash
# echo '1.2.3.4 - - [19/Nov/2023:00:24:43 +0000] "GET /index.html HTTP/1.1" 200 4172 "-" "Go-http-client/1.1"' | log2journal -f nginx.yaml | systemd-cat-native
# no output
@@ -486,7 +484,7 @@ tail -F /var/log/nginx/access.log |\
Create the file `/etc/systemd/system/nginx-logs.service` (change `/path/to/nginx.yaml` to the right path):
-```
+```text
[Unit]
Description=NGINX Log to Systemd Journal
After=network.target
@@ -524,7 +522,6 @@ Netdata will automatically pick the new namespace and present it at the list of
You can also instruct `systemd-cat-native` to log to a remote system, sending the logs to a `systemd-journal-remote` instance running on another server. Check [the manual of systemd-cat-native](/src/libnetdata/log/systemd-cat-native.md).
-
## Performance
`log2journal` and `systemd-cat-native` have been designed to process hundreds of thousands of log lines per second. They both utilize high performance indexing hashtables to speed up lookups, and queues that dynamically adapt to the number of log lines offered, offering a smooth and fast experience under all conditions.
@@ -537,15 +534,15 @@ The key characteristic that can influence the performance of a logs processing p
Especially the pattern `.*` seems to have the biggest impact on CPU consumption, especially when multiple `.*` are on the same pattern.
-Usually we use `.*` to indicate that we need to match everything up to a character, e.g. `.* ` to match up to a space. By replacing it with `[^ ]+` (meaning: match at least a character up to a space), the regular expression engine can be a lot more efficient, reducing the overall CPU utilization significantly.
+Usually we use `.*` to indicate that we need to match everything up to a character, e.g. `.*` to match up to a space. By replacing it with `[^ ]+` (meaning: match at least a character up to a space), the regular expression engine can be a lot more efficient, reducing the overall CPU utilization significantly.
### Performance of systemd journals
The ingestion pipeline of logs, from `tail` to `systemd-journald` or `systemd-journal-remote` is very efficient in all aspects. CPU utilization is better than any other system we tested and RAM usage is independent of the number of fields indexed, making systemd-journal one of the most efficient log management engines for ingesting high volumes of structured logs.
-High fields cardinality does not have a noticable impact on systemd-journal. The amount of fields indexed and the amount of unique values per field, have a linear and predictable result in the resource utilization of `systemd-journald` and `systemd-journal-remote`. This is unlike other logs management solutions, like Loki, that their RAM requirements grow exponentially as the cardinality increases, making it impractical for them to index the amount of information systemd journals can index.
+High fields cardinality does not have a noticeable impact on systemd-journal. The amount of fields indexed and the amount of unique values per field, have a linear and predictable result in the resource utilization of `systemd-journald` and `systemd-journal-remote`. This is unlike other logs management solutions, like Loki, that their RAM requirements grow exponentially as the cardinality increases, making it impractical for them to index the amount of information systemd journals can index.
-However, the number of fields added to journals influences the overall disk footprint. Less fields means more log entries per journal file, smaller overall disk footprint and faster queries.
+However, the number of fields added to journals influences the overall disk footprint. Less fields means more log entries per journal file, smaller overall disk footprint and faster queries.
systemd-journal files are primarily designed for security and reliability. This comes at the cost of disk footprint. The internal structure of journal files is such that in case of corruption, minimum data loss will incur. To achieve such a unique characteristic, certain data within the files need to be aligned at predefined boundaries, so that in case there is a corruption, non-corrupted parts of the journal file can be recovered.
@@ -578,7 +575,7 @@ If on other hand your organization prefers to maintain the full logs and control
## `log2journal` options
-```
+```text
Netdata log2journal v1.43.0-341-gdac4df856
diff --git a/src/collectors/log2journal/log2journal-hashed-key.h b/src/collectors/log2journal/log2journal-hashed-key.h
new file mode 100644
index 000000000..0618d9538
--- /dev/null
+++ b/src/collectors/log2journal/log2journal-hashed-key.h
@@ -0,0 +1,80 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_LOG2JOURNAL_HASHED_KEY_H
+#define NETDATA_LOG2JOURNAL_HASHED_KEY_H
+
+#include "log2journal.h"
+
+typedef enum __attribute__((__packed__)) {
+ HK_NONE = 0,
+
+ // permanent flags - they are set once to optimize various decisions and lookups
+
+ HK_HASHTABLE_ALLOCATED = (1 << 0), // this is the key object allocated in the hashtable
+ // objects that do not have this, have a pointer to a key in the hashtable
+ // objects that have this, value is allocated
+
+ HK_FILTERED = (1 << 1), // we checked once if this key in filtered
+ HK_FILTERED_INCLUDED = (1 << 2), // the result of the filtering was to include it in the output
+
+ HK_COLLISION_CHECKED = (1 << 3), // we checked once for collision check of this key
+
+ HK_RENAMES_CHECKED = (1 << 4), // we checked once if there are renames on this key
+ HK_HAS_RENAMES = (1 << 5), // and we found there is a rename rule related to it
+
+ // ephemeral flags - they are unset at the end of each log line
+
+ HK_VALUE_FROM_LOG = (1 << 14), // the value of this key has been read from the log (or from injection, duplication)
+ HK_VALUE_REWRITTEN = (1 << 15), // the value of this key has been rewritten due to one of our rewrite rules
+
+} HASHED_KEY_FLAGS;
+
+typedef struct hashed_key {
+ const char *key;
+ uint32_t len;
+ HASHED_KEY_FLAGS flags;
+ XXH64_hash_t hash;
+ union {
+ struct hashed_key *hashtable_ptr; // HK_HASHTABLE_ALLOCATED is not set
+ TXT_L2J value; // HK_HASHTABLE_ALLOCATED is set
+ };
+} HASHED_KEY;
+
+static inline void hashed_key_cleanup(HASHED_KEY *k) {
+ if(k->flags & HK_HASHTABLE_ALLOCATED)
+ txt_l2j_cleanup(&k->value);
+ else
+ k->hashtable_ptr = NULL;
+
+ freez((void *)k->key);
+ k->key = NULL;
+ k->len = 0;
+ k->hash = 0;
+ k->flags = HK_NONE;
+}
+
+static inline void hashed_key_set(HASHED_KEY *k, const char *name, int32_t len) {
+ hashed_key_cleanup(k);
+
+ if(len == -1) {
+ k->key = strdupz(name);
+ k->len = strlen(k->key);
+ }
+ else {
+ k->key = strndupz(name, len);
+ k->len = len;
+ }
+
+ k->hash = XXH3_64bits(k->key, k->len);
+ k->flags = HK_NONE;
+}
+
+static inline bool hashed_keys_match(HASHED_KEY *k1, HASHED_KEY *k2) {
+ return ((k1 == k2) || (k1->hash == k2->hash && strcmp(k1->key, k2->key) == 0));
+}
+
+static inline int compare_keys(struct hashed_key *k1, struct hashed_key *k2) {
+ return strcmp(k1->key, k2->key);
+}
+
+#endif //NETDATA_LOG2JOURNAL_HASHED_KEY_H
diff --git a/src/collectors/log2journal/log2journal-help.c b/src/collectors/log2journal/log2journal-help.c
index 23ff4c056..0cb35bb0f 100644
--- a/src/collectors/log2journal/log2journal-help.c
+++ b/src/collectors/log2journal/log2journal-help.c
@@ -10,7 +10,7 @@ static void config_dir_print_available(void) {
dir = opendir(path);
if (dir == NULL) {
- log2stderr(" >>> Cannot open directory:\n %s", path);
+ l2j_log(" >>> Cannot open directory:\n %s", path);
return;
}
diff --git a/src/collectors/log2journal/log2journal-inject.c b/src/collectors/log2journal/log2journal-inject.c
index 45158066b..f1a70ac8b 100644
--- a/src/collectors/log2journal/log2journal-inject.c
+++ b/src/collectors/log2journal/log2journal-inject.c
@@ -9,12 +9,13 @@ void injection_cleanup(INJECTION *inj) {
static inline bool log_job_injection_replace(INJECTION *inj, const char *key, size_t key_len, const char *value, size_t value_len) {
if(key_len > JOURNAL_MAX_KEY_LEN)
- log2stderr("WARNING: injection key '%.*s' is too long for journal. Will be truncated.", (int)key_len, key);
+ l2j_log("WARNING: injection key '%.*s' is too long for journal. Will be truncated.", (int)key_len, key);
if(value_len > JOURNAL_MAX_VALUE_LEN)
- log2stderr("WARNING: injection value of key '%.*s' is too long for journal. Will be truncated.", (int)key_len, key);
+ l2j_log(
+ "WARNING: injection value of key '%.*s' is too long for journal. Will be truncated.", (int)key_len, key);
- hashed_key_len_set(&inj->key, key, key_len);
+ hashed_key_set(&inj->key, key, key_len);
char *v = strndupz(value, value_len);
bool ret = replace_pattern_set(&inj->value, v);
freez(v);
@@ -25,13 +26,13 @@ static inline bool log_job_injection_replace(INJECTION *inj, const char *key, si
bool log_job_injection_add(LOG_JOB *jb, const char *key, size_t key_len, const char *value, size_t value_len, bool unmatched) {
if (unmatched) {
if (jb->unmatched.injections.used >= MAX_INJECTIONS) {
- log2stderr("Error: too many unmatched injections. You can inject up to %d lines.", MAX_INJECTIONS);
+ l2j_log("Error: too many unmatched injections. You can inject up to %d lines.", MAX_INJECTIONS);
return false;
}
}
else {
if (jb->injections.used >= MAX_INJECTIONS) {
- log2stderr("Error: too many injections. You can inject up to %d lines.", MAX_INJECTIONS);
+ l2j_log("Error: too many injections. You can inject up to %d lines.", MAX_INJECTIONS);
return false;
}
}
diff --git a/src/collectors/log2journal/log2journal-params.c b/src/collectors/log2journal/log2journal-params.c
index a7bb3e263..a56d1711e 100644
--- a/src/collectors/log2journal/log2journal-params.c
+++ b/src/collectors/log2journal/log2journal-params.c
@@ -7,7 +7,7 @@
void log_job_init(LOG_JOB *jb) {
memset(jb, 0, sizeof(*jb));
simple_hashtable_init_KEY(&jb->hashtable, 32);
- hashed_key_set(&jb->line.key, "LINE");
+ hashed_key_set(&jb->line.key, "LINE", -1);
}
static void simple_hashtable_cleanup_allocated_keys(SIMPLE_HASHTABLE_KEY *ht) {
@@ -47,8 +47,14 @@ void log_job_cleanup(LOG_JOB *jb) {
for(size_t i = 0; i < jb->rewrites.used; i++)
rewrite_cleanup(&jb->rewrites.array[i]);
- txt_cleanup(&jb->rewrites.tmp);
- txt_cleanup(&jb->filename.current);
+ search_pattern_cleanup(&jb->filter.include);
+ search_pattern_cleanup(&jb->filter.exclude);
+
+ hashed_key_cleanup(&jb->filename.key);
+ hashed_key_cleanup(&jb->unmatched.key);
+
+ txt_l2j_cleanup(&jb->rewrites.tmp);
+ txt_l2j_cleanup(&jb->filename.current);
simple_hashtable_cleanup_allocated_keys(&jb->hashtable);
simple_hashtable_destroy_KEY(&jb->hashtable);
@@ -61,18 +67,18 @@ void log_job_cleanup(LOG_JOB *jb) {
bool log_job_filename_key_set(LOG_JOB *jb, const char *key, size_t key_len) {
if(!key || !*key) {
- log2stderr("filename key cannot be empty.");
+ l2j_log("filename key cannot be empty.");
return false;
}
- hashed_key_len_set(&jb->filename.key, key, key_len);
+ hashed_key_set(&jb->filename.key, key, key_len);
return true;
}
bool log_job_key_prefix_set(LOG_JOB *jb, const char *prefix, size_t prefix_len) {
if(!prefix || !*prefix) {
- log2stderr("filename key cannot be empty.");
+ l2j_log("filename key cannot be empty.");
return false;
}
@@ -86,7 +92,7 @@ bool log_job_key_prefix_set(LOG_JOB *jb, const char *prefix, size_t prefix_len)
bool log_job_pattern_set(LOG_JOB *jb, const char *pattern, size_t pattern_len) {
if(!pattern || !*pattern) {
- log2stderr("filename key cannot be empty.");
+ l2j_log("filename key cannot be empty.");
return false;
}
@@ -100,12 +106,12 @@ bool log_job_pattern_set(LOG_JOB *jb, const char *pattern, size_t pattern_len) {
bool log_job_include_pattern_set(LOG_JOB *jb, const char *pattern, size_t pattern_len) {
if(jb->filter.include.re) {
- log2stderr("FILTER INCLUDE: there is already an include filter set");
+ l2j_log("FILTER INCLUDE: there is already an include filter set");
return false;
}
if(!search_pattern_set(&jb->filter.include, pattern, pattern_len)) {
- log2stderr("FILTER INCLUDE: failed: %s", jb->filter.include.error.txt);
+ l2j_log("FILTER INCLUDE: failed: %s", jb->filter.include.error.txt);
return false;
}
@@ -114,12 +120,12 @@ bool log_job_include_pattern_set(LOG_JOB *jb, const char *pattern, size_t patter
bool log_job_exclude_pattern_set(LOG_JOB *jb, const char *pattern, size_t pattern_len) {
if(jb->filter.exclude.re) {
- log2stderr("FILTER INCLUDE: there is already an exclude filter set");
+ l2j_log("FILTER INCLUDE: there is already an exclude filter set");
return false;
}
if(!search_pattern_set(&jb->filter.exclude, pattern, pattern_len)) {
- log2stderr("FILTER EXCLUDE: failed: %s", jb->filter.exclude.error.txt);
+ l2j_log("FILTER EXCLUDE: failed: %s", jb->filter.exclude.error.txt);
return false;
}
@@ -132,7 +138,7 @@ static bool parse_rename(LOG_JOB *jb, const char *param) {
// Search for '=' in param
const char *equal_sign = strchr(param, '=');
if (!equal_sign || equal_sign == param) {
- log2stderr("Error: Invalid rename format, '=' not found in %s", param);
+ l2j_log("Error: Invalid rename format, '=' not found in %s", param);
return false;
}
@@ -210,7 +216,7 @@ RW_FLAGS parse_rewrite_flags(const char *options) {
}
if(!found)
- log2stderr("Warning: rewrite options '%s' is not understood.", token);
+ l2j_log("Warning: rewrite options '%s' is not understood.", token);
// Get the next token
token = strtok(NULL, ",");
@@ -226,33 +232,33 @@ static bool parse_rewrite(LOG_JOB *jb, const char *param) {
// Search for '=' in param
const char *equal_sign = strchr(param, '=');
if (!equal_sign || equal_sign == param) {
- log2stderr("Error: Invalid rewrite format, '=' not found in %s", param);
+ l2j_log("Error: Invalid rewrite format, '=' not found in %s", param);
return false;
}
// Get the next character as the separator
char separator = *(equal_sign + 1);
if (!separator || !is_symbol(separator)) {
- log2stderr("Error: rewrite separator not found after '=', or is not one of /\\|-# in: %s", param);
+ l2j_log("Error: rewrite separator not found after '=', or is not one of /\\|-# in: %s", param);
return false;
}
// Find the next occurrence of the separator
const char *second_separator = strchr(equal_sign + 2, separator);
if (!second_separator) {
- log2stderr("Error: rewrite second separator not found in: %s", param);
+ l2j_log("Error: rewrite second separator not found in: %s", param);
return false;
}
// Check if the search pattern is empty
if (equal_sign + 1 == second_separator) {
- log2stderr("Error: rewrite search pattern is empty in: %s", param);
+ l2j_log("Error: rewrite search pattern is empty in: %s", param);
return false;
}
// Check if the replacement pattern is empty
if (*(second_separator + 1) == '\0') {
- log2stderr("Error: rewrite replacement pattern is empty in: %s", param);
+ l2j_log("Error: rewrite replacement pattern is empty in: %s", param);
return false;
}
@@ -281,7 +287,7 @@ static bool parse_rewrite(LOG_JOB *jb, const char *param) {
static bool parse_inject(LOG_JOB *jb, const char *value, bool unmatched) {
const char *equal = strchr(value, '=');
if (!equal) {
- log2stderr("Error: injection '%s' does not have an equal sign.", value);
+ l2j_log("Error: injection '%s' does not have an equal sign.", value);
return false;
}
@@ -330,7 +336,10 @@ bool log_job_command_line_parse_parameters(LOG_JOB *jb, int argc, char **argv) {
log_job_pattern_set(jb, arg, strlen(arg));
continue;
} else {
- log2stderr("Error: Multiple patterns detected. Specify only one pattern. The first is '%s', the second is '%s'", jb->pattern, arg);
+ l2j_log(
+ "Error: Multiple patterns detected. Specify only one pattern. The first is '%s', the second is '%s'",
+ jb->pattern,
+ arg);
return false;
}
}
@@ -355,7 +364,7 @@ bool log_job_command_line_parse_parameters(LOG_JOB *jb, int argc, char **argv) {
}
#endif
else if (strcmp(param, "--unmatched-key") == 0)
- hashed_key_set(&jb->unmatched.key, value);
+ hashed_key_set(&jb->unmatched.key, value, -1);
else if (strcmp(param, "--inject") == 0) {
if (!parse_inject(jb, value, false))
return false;
@@ -386,7 +395,10 @@ bool log_job_command_line_parse_parameters(LOG_JOB *jb, int argc, char **argv) {
log_job_pattern_set(jb, arg, strlen(arg));
continue;
} else {
- log2stderr("Error: Multiple patterns detected. Specify only one pattern. The first is '%s', the second is '%s'", jb->pattern, arg);
+ l2j_log(
+ "Error: Multiple patterns detected. Specify only one pattern. The first is '%s', the second is '%s'",
+ jb->pattern,
+ arg);
return false;
}
}
@@ -395,7 +407,7 @@ bool log_job_command_line_parse_parameters(LOG_JOB *jb, int argc, char **argv) {
// Check if a pattern is set and exactly one pattern is specified
if (!jb->pattern) {
- log2stderr("Warning: pattern not specified. Try the default config with: -c default");
+ l2j_log("Warning: pattern not specified. Try the default config with: -c default");
log_job_command_line_help(argv[0]);
return false;
}
diff --git a/src/collectors/log2journal/log2journal-pattern.c b/src/collectors/log2journal/log2journal-pattern.c
index 4b7e9026b..158ac1129 100644
--- a/src/collectors/log2journal/log2journal-pattern.c
+++ b/src/collectors/log2journal/log2journal-pattern.c
@@ -18,13 +18,13 @@ void search_pattern_cleanup(SEARCH_PATTERN *sp) {
sp->match_data = NULL;
}
- txt_cleanup(&sp->error);
+ txt_l2j_cleanup(&sp->error);
}
static void pcre2_error_message(SEARCH_PATTERN *sp, int rc, int pos) {
char msg[1024];
pcre2_get_error_in_buffer(msg, sizeof(msg), rc, pos);
- txt_replace(&sp->error, msg, strlen(msg));
+ txt_l2j_set(&sp->error, msg, strlen(msg));
}
static inline bool compile_pcre2(SEARCH_PATTERN *sp) {
diff --git a/src/collectors/log2journal/log2journal-pcre2.c b/src/collectors/log2journal/log2journal-pcre2.c
index 185e69108..77f804cc8 100644
--- a/src/collectors/log2journal/log2journal-pcre2.c
+++ b/src/collectors/log2journal/log2journal-pcre2.c
@@ -102,8 +102,15 @@ PCRE2_STATE *pcre2_parser_create(LOG_JOB *jb) {
}
void pcre2_parser_destroy(PCRE2_STATE *pcre2) {
- if(pcre2)
+ if(pcre2) {
+ if(pcre2->re)
+ pcre2_code_free(pcre2->re);
+
+ if(pcre2->match_data)
+ pcre2_match_data_free(pcre2->match_data);
+
freez(pcre2);
+ }
}
const char *pcre2_parser_error(PCRE2_STATE *pcre2) {
diff --git a/src/collectors/log2journal/log2journal-rename.c b/src/collectors/log2journal/log2journal-rename.c
index c6975779f..11b3d2178 100644
--- a/src/collectors/log2journal/log2journal-rename.c
+++ b/src/collectors/log2journal/log2journal-rename.c
@@ -9,13 +9,13 @@ void rename_cleanup(RENAME *rn) {
bool log_job_rename_add(LOG_JOB *jb, const char *new_key, size_t new_key_len, const char *old_key, size_t old_key_len) {
if(jb->renames.used >= MAX_RENAMES) {
- log2stderr("Error: too many renames. You can rename up to %d fields.", MAX_RENAMES);
+ l2j_log("Error: too many renames. You can rename up to %d fields.", MAX_RENAMES);
return false;
}
RENAME *rn = &jb->renames.array[jb->renames.used++];
- hashed_key_len_set(&rn->new_key, new_key, new_key_len);
- hashed_key_len_set(&rn->old_key, old_key, old_key_len);
+ hashed_key_set(&rn->new_key, new_key, new_key_len);
+ hashed_key_set(&rn->old_key, old_key, old_key_len);
return true;
}
diff --git a/src/collectors/log2journal/log2journal-replace.c b/src/collectors/log2journal/log2journal-replace.c
index 7075d109d..66ba48d9f 100644
--- a/src/collectors/log2journal/log2journal-replace.c
+++ b/src/collectors/log2journal/log2journal-replace.c
@@ -26,7 +26,7 @@ static REPLACE_NODE *replace_pattern_add_node(REPLACE_NODE **head, bool is_varia
if (!new_node)
return NULL;
- hashed_key_set(&new_node->name, text);
+ hashed_key_set(&new_node->name, text, -1);
new_node->is_variable = is_variable;
new_node->next = NULL;
@@ -57,21 +57,21 @@ bool replace_pattern_set(REPLACE_PATTERN *rp, const char *pattern) {
// Start of a variable
const char *end = strchr(current, '}');
if (!end) {
- log2stderr("Error: Missing closing brace in replacement pattern: %s", rp->pattern);
+ l2j_log("Error: Missing closing brace in replacement pattern: %s", rp->pattern);
return false;
}
size_t name_length = end - current - 2; // Length of the variable name
char *variable_name = strndupz(current + 2, name_length);
if (!variable_name) {
- log2stderr("Error: Memory allocation failed for variable name.");
+ l2j_log("Error: Memory allocation failed for variable name.");
return false;
}
REPLACE_NODE *node = replace_pattern_add_node(&(rp->nodes), true, variable_name);
if (!node) {
freez(variable_name);
- log2stderr("Error: Failed to add replacement node for variable.");
+ l2j_log("Error: Failed to add replacement node for variable.");
return false;
}
freez(variable_name);
@@ -88,14 +88,14 @@ bool replace_pattern_set(REPLACE_PATTERN *rp, const char *pattern) {
size_t text_length = current - start;
char *text = strndupz(start, text_length);
if (!text) {
- log2stderr("Error: Memory allocation failed for literal text.");
+ l2j_log("Error: Memory allocation failed for literal text.");
return false;
}
REPLACE_NODE *node = replace_pattern_add_node(&(rp->nodes), false, text);
if (!node) {
freez(text);
- log2stderr("Error: Failed to add replacement node for text.");
+ l2j_log("Error: Failed to add replacement node for text.");
return false;
}
freez(text);
diff --git a/src/collectors/log2journal/log2journal-rewrite.c b/src/collectors/log2journal/log2journal-rewrite.c
index 112391bf0..0c9a8ddea 100644
--- a/src/collectors/log2journal/log2journal-rewrite.c
+++ b/src/collectors/log2journal/log2journal-rewrite.c
@@ -7,6 +7,7 @@ void rewrite_cleanup(REWRITE *rw) {
if(rw->flags & RW_MATCH_PCRE2)
search_pattern_cleanup(&rw->match_pcre2);
+
else if(rw->flags & RW_MATCH_NON_EMPTY)
replace_pattern_cleanup(&rw->match_non_empty);
@@ -16,19 +17,19 @@ void rewrite_cleanup(REWRITE *rw) {
bool log_job_rewrite_add(LOG_JOB *jb, const char *key, RW_FLAGS flags, const char *search_pattern, const char *replace_pattern) {
if(jb->rewrites.used >= MAX_REWRITES) {
- log2stderr("Error: too many rewrites. You can add up to %d rewrite rules.", MAX_REWRITES);
+ l2j_log("Error: too many rewrites. You can add up to %d rewrite rules.", MAX_REWRITES);
return false;
}
if((flags & (RW_MATCH_PCRE2|RW_MATCH_NON_EMPTY)) && (!search_pattern || !*search_pattern)) {
- log2stderr("Error: rewrite for key '%s' does not specify a search pattern.", key);
+ l2j_log("Error: rewrite for key '%s' does not specify a search pattern.", key);
return false;
}
REWRITE *rw = &jb->rewrites.array[jb->rewrites.used++];
rw->flags = flags;
- hashed_key_set(&rw->key, key);
+ hashed_key_set(&rw->key, key, -1);
if((flags & RW_MATCH_PCRE2) && !search_pattern_set(&rw->match_pcre2, search_pattern, strlen(search_pattern))) {
rewrite_cleanup(rw);
diff --git a/src/collectors/log2journal/log2journal-txt.h b/src/collectors/log2journal/log2journal-txt.h
new file mode 100644
index 000000000..f68b85a3d
--- /dev/null
+++ b/src/collectors/log2journal/log2journal-txt.h
@@ -0,0 +1,90 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_LOG2JOURNAL_TXT_H
+#define NETDATA_LOG2JOURNAL_TXT_H
+
+#include "log2journal.h"
+
+// ----------------------------------------------------------------------------
+// A dynamically sized, reusable text buffer,
+// allowing us to be fast (no allocations during iterations) while having the
+// smallest possible allocations.
+
+typedef struct txt_l2j {
+ char *txt;
+ uint32_t size;
+ uint32_t len;
+} TXT_L2J;
+
+static inline void txt_l2j_cleanup(TXT_L2J *t) {
+ if(!t)
+ return;
+
+ if(t->txt)
+ freez(t->txt);
+
+ t->txt = NULL;
+ t->size = 0;
+ t->len = 0;
+}
+
+#define TXT_L2J_ALLOC_ALIGN 1024
+
+static inline size_t txt_l2j_compute_new_size(size_t old_size, size_t required_size) {
+ size_t size = (required_size % TXT_L2J_ALLOC_ALIGN == 0) ? required_size : required_size + TXT_L2J_ALLOC_ALIGN;
+ size = (size / TXT_L2J_ALLOC_ALIGN) * TXT_L2J_ALLOC_ALIGN;
+
+ if(size < old_size * 2)
+ size = old_size * 2;
+
+ return size;
+}
+
+static inline void txt_l2j_resize(TXT_L2J *dst, size_t required_size, bool keep) {
+ if(required_size <= dst->size)
+ return;
+
+ size_t new_size = txt_l2j_compute_new_size(dst->size, required_size);
+
+ if(keep && dst->txt)
+ dst->txt = reallocz(dst->txt, new_size);
+ else {
+ txt_l2j_cleanup(dst);
+ dst->txt = mallocz(new_size);
+ dst->len = 0;
+ }
+
+ dst->size = new_size;
+}
+
+static inline void txt_l2j_set(TXT_L2J *dst, const char *s, int32_t len) {
+ if(!s || !*s || len == 0) {
+ s = "";
+ len = 0;
+ }
+
+ if(len == -1)
+ len = (int32_t)strlen(s);
+
+ txt_l2j_resize(dst, len + 1, false);
+ memcpy(dst->txt, s, len);
+ dst->txt[len] = '\0';
+ dst->len = len;
+}
+
+static inline void txt_l2j_append(TXT_L2J *dst, const char *s, int32_t len) {
+ if(!dst->txt || !dst->len)
+ txt_l2j_set(dst, s, len);
+
+ else {
+ if(len == -1)
+ len = (int32_t)strlen(s);
+
+ txt_l2j_resize(dst, dst->len + len + 1, true);
+ memcpy(&dst->txt[dst->len], s, len);
+ dst->len += len;
+ dst->txt[dst->len] = '\0';
+ }
+}
+
+#endif //NETDATA_LOG2JOURNAL_TXT_H
diff --git a/src/collectors/log2journal/log2journal-yaml.c b/src/collectors/log2journal/log2journal-yaml.c
index e73a469f5..53f83d623 100644
--- a/src/collectors/log2journal/log2journal-yaml.c
+++ b/src/collectors/log2journal/log2journal-yaml.c
@@ -280,6 +280,8 @@ static bool yaml_parse_constant_field_injection(yaml_parser_t *parser, LOG_JOB *
goto cleanup;
}
+ yaml_event_delete(&event);
+
if (!yaml_parse(parser, &event) || event.type != YAML_SCALAR_EVENT) {
yaml_error(parser, &event, "Expected scalar for constant field injection value");
goto cleanup;
@@ -315,7 +317,7 @@ static bool yaml_parse_injection_mapping(yaml_parser_t *parser, LOG_JOB *jb, boo
switch (event.type) {
case YAML_SCALAR_EVENT:
if (yaml_scalar_matches(&event, "key", strlen("key"))) {
- errors += yaml_parse_constant_field_injection(parser, jb, unmatched);
+ errors += yaml_parse_constant_field_injection(parser, jb, unmatched) ? 1 : 0;
} else {
yaml_error(parser, &event, "Unexpected scalar in injection mapping");
errors++;
@@ -396,7 +398,8 @@ static size_t yaml_parse_unmatched(yaml_parser_t *parser, LOG_JOB *jb) {
errors++;
} else {
if (sub_event.type == YAML_SCALAR_EVENT) {
- hashed_key_len_set(&jb->unmatched.key, (char *)sub_event.data.scalar.value, sub_event.data.scalar.length);
+ hashed_key_set(
+ &jb->unmatched.key, (char *)sub_event.data.scalar.value, sub_event.data.scalar.length);
} else {
yaml_error(parser, &sub_event, "expected a scalar value for 'key'");
errors++;
@@ -427,6 +430,149 @@ static size_t yaml_parse_unmatched(yaml_parser_t *parser, LOG_JOB *jb) {
return errors;
}
+static bool yaml_parse_scalar_boolean(yaml_parser_t *parser, bool def, const char *where, size_t *errors) {
+ bool rc = def;
+
+ yaml_event_t value_event;
+ if (!yaml_parse(parser, &value_event)) {
+ (*errors)++;
+ return rc;
+ }
+
+ if (value_event.type != YAML_SCALAR_EVENT) {
+ yaml_error(parser, &value_event, "Expected scalar for %s boolean", where);
+ (*errors)++;
+ }
+ else if(strncmp((char*)value_event.data.scalar.value, "yes", 3) == 0 ||
+ strncmp((char*)value_event.data.scalar.value, "true", 4) == 0)
+ rc = true;
+ else if(strncmp((char*)value_event.data.scalar.value, "no", 2) == 0 ||
+ strncmp((char*)value_event.data.scalar.value, "false", 5) == 0)
+ rc = false;
+ else {
+ yaml_error(parser, &value_event, "Expected scalar for %s boolean: invalid value %s", where, value_event.data.scalar.value);
+ rc = def;
+ }
+
+ yaml_event_delete(&value_event);
+ return rc;
+}
+
+static bool handle_rewrite_event(yaml_parser_t *parser, yaml_event_t *event,
+ char **key, char **search_pattern, char **replace_pattern,
+ RW_FLAGS *flags, bool *mapping_finished,
+ LOG_JOB *jb, size_t *errors) {
+ switch (event->type) {
+ case YAML_SCALAR_EVENT:
+ if (yaml_scalar_matches(event, "key", strlen("key"))) {
+ yaml_event_t value_event;
+ if (!yaml_parse(parser, &value_event)) {
+ (*errors)++;
+ return false;
+ }
+
+ if (value_event.type != YAML_SCALAR_EVENT) {
+ yaml_error(parser, &value_event, "Expected scalar for rewrite key");
+ (*errors)++;
+ } else {
+ freez(*key);
+ *key = strndupz((char *)value_event.data.scalar.value, value_event.data.scalar.length);
+ }
+ yaml_event_delete(&value_event);
+ }
+ else if (yaml_scalar_matches(event, "match", strlen("match"))) {
+ yaml_event_t value_event;
+ if (!yaml_parse(parser, &value_event)) {
+ (*errors)++;
+ return false;
+ }
+
+ if (value_event.type != YAML_SCALAR_EVENT) {
+ yaml_error(parser, &value_event, "Expected scalar for rewrite match PCRE2 pattern");
+ (*errors)++;
+ }
+ else {
+ freez(*search_pattern);
+ *flags |= RW_MATCH_PCRE2;
+ *flags &= ~RW_MATCH_NON_EMPTY;
+ *search_pattern = strndupz((char *)value_event.data.scalar.value, value_event.data.scalar.length);
+ }
+ yaml_event_delete(&value_event);
+ }
+ else if (yaml_scalar_matches(event, "not_empty", strlen("not_empty"))) {
+ yaml_event_t value_event;
+ if (!yaml_parse(parser, &value_event)) {
+ (*errors)++;
+ return false;
+ }
+
+ if (value_event.type != YAML_SCALAR_EVENT) {
+ yaml_error(parser, &value_event, "Expected scalar for rewrite not empty condition");
+ (*errors)++;
+ }
+ else {
+ freez(*search_pattern);
+ *flags |= RW_MATCH_NON_EMPTY;
+ *flags &= ~RW_MATCH_PCRE2;
+ *search_pattern = strndupz((char *)value_event.data.scalar.value, value_event.data.scalar.length);
+ }
+ yaml_event_delete(&value_event);
+ }
+ else if (yaml_scalar_matches(event, "value", strlen("value"))) {
+ yaml_event_t value_event;
+ if (!yaml_parse(parser, &value_event)) {
+ (*errors)++;
+ return false;
+ }
+
+ if (value_event.type != YAML_SCALAR_EVENT) {
+ yaml_error(parser, &value_event, "Expected scalar for rewrite value");
+ (*errors)++;
+ } else {
+ freez(*replace_pattern);
+ *replace_pattern = strndupz((char *)value_event.data.scalar.value, value_event.data.scalar.length);
+ }
+ yaml_event_delete(&value_event);
+ }
+ else if (yaml_scalar_matches(event, "stop", strlen("stop"))) {
+ if(yaml_parse_scalar_boolean(parser, true, "rewrite stop", errors))
+ *flags &= ~RW_DONT_STOP;
+ else
+ *flags |= RW_DONT_STOP;
+ }
+ else if (yaml_scalar_matches(event, "inject", strlen("inject"))) {
+ if(yaml_parse_scalar_boolean(parser, false, "rewrite inject", errors))
+ *flags |= RW_INJECT;
+ else
+ *flags &= ~RW_INJECT;
+ }
+ else {
+ yaml_error(parser, event, "Unexpected scalar in rewrite mapping");
+ (*errors)++;
+ }
+ break;
+
+ case YAML_MAPPING_END_EVENT:
+ if(*key) {
+ if (!log_job_rewrite_add(jb, *key, *flags, *search_pattern, *replace_pattern))
+ (*errors)++;
+ }
+
+ freez(*key);
+ freez(*search_pattern);
+ freez(*replace_pattern);
+ *mapping_finished = true;
+ break;
+
+ default:
+ yaml_error(parser, event, "Unexpected event in rewrite mapping");
+ (*errors)++;
+ break;
+ }
+
+ return true;
+}
+
static size_t yaml_parse_rewrites(yaml_parser_t *parser, LOG_JOB *jb) {
size_t errors = 0;
@@ -457,120 +603,14 @@ static size_t yaml_parse_rewrites(yaml_parser_t *parser, LOG_JOB *jb) {
continue;
}
- switch (sub_event.type) {
- case YAML_SCALAR_EVENT:
- if (yaml_scalar_matches(&sub_event, "key", strlen("key"))) {
- if (!yaml_parse(parser, &sub_event) || sub_event.type != YAML_SCALAR_EVENT) {
- yaml_error(parser, &sub_event, "Expected scalar for rewrite key");
- errors++;
- } else {
- freez(key);
- key = strndupz((char *)sub_event.data.scalar.value, sub_event.data.scalar.length);
- yaml_event_delete(&sub_event);
- }
- } else if (yaml_scalar_matches(&sub_event, "match", strlen("match"))) {
- if (!yaml_parse(parser, &sub_event) || sub_event.type != YAML_SCALAR_EVENT) {
- yaml_error(parser, &sub_event, "Expected scalar for rewrite match PCRE2 pattern");
- errors++;
- }
- else {
- if(search_pattern)
- freez(search_pattern);
- flags |= RW_MATCH_PCRE2;
- flags &= ~RW_MATCH_NON_EMPTY;
- search_pattern = strndupz((char *)sub_event.data.scalar.value, sub_event.data.scalar.length);
- yaml_event_delete(&sub_event);
- }
- } else if (yaml_scalar_matches(&sub_event, "not_empty", strlen("not_empty"))) {
- if (!yaml_parse(parser, &sub_event) || sub_event.type != YAML_SCALAR_EVENT) {
- yaml_error(parser, &sub_event, "Expected scalar for rewrite not empty condition");
- errors++;
- }
- else {
- if(search_pattern)
- freez(search_pattern);
- flags |= RW_MATCH_NON_EMPTY;
- flags &= ~RW_MATCH_PCRE2;
- search_pattern = strndupz((char *)sub_event.data.scalar.value, sub_event.data.scalar.length);
- yaml_event_delete(&sub_event);
- }
- } else if (yaml_scalar_matches(&sub_event, "value", strlen("value"))) {
- if (!yaml_parse(parser, &sub_event) || sub_event.type != YAML_SCALAR_EVENT) {
- yaml_error(parser, &sub_event, "Expected scalar for rewrite value");
- errors++;
- } else {
- freez(replace_pattern);
- replace_pattern = strndupz((char *)sub_event.data.scalar.value, sub_event.data.scalar.length);
- yaml_event_delete(&sub_event);
- }
- } else if (yaml_scalar_matches(&sub_event, "stop", strlen("stop"))) {
- if (!yaml_parse(parser, &sub_event) || sub_event.type != YAML_SCALAR_EVENT) {
- yaml_error(parser, &sub_event, "Expected scalar for rewrite stop boolean");
- errors++;
- } else {
- if(strncmp((char*)sub_event.data.scalar.value, "no", 2) == 0 ||
- strncmp((char*)sub_event.data.scalar.value, "false", 5) == 0)
- flags |= RW_DONT_STOP;
- else
- flags &= ~RW_DONT_STOP;
-
- yaml_event_delete(&sub_event);
- }
- } else if (yaml_scalar_matches(&sub_event, "inject", strlen("inject"))) {
- if (!yaml_parse(parser, &sub_event) || sub_event.type != YAML_SCALAR_EVENT) {
- yaml_error(parser, &sub_event, "Expected scalar for rewrite inject boolean");
- errors++;
- } else {
- if(strncmp((char*)sub_event.data.scalar.value, "yes", 3) == 0 ||
- strncmp((char*)sub_event.data.scalar.value, "true", 4) == 0)
- flags |= RW_INJECT;
- else
- flags &= ~RW_INJECT;
-
- yaml_event_delete(&sub_event);
- }
- } else {
- yaml_error(parser, &sub_event, "Unexpected scalar in rewrite mapping");
- errors++;
- }
- break;
-
- case YAML_MAPPING_END_EVENT:
- if(key) {
- if (!log_job_rewrite_add(jb, key, flags, search_pattern, replace_pattern))
- errors++;
- }
-
- freez(key);
- key = NULL;
-
- freez(search_pattern);
- search_pattern = NULL;
-
- freez(replace_pattern);
- replace_pattern = NULL;
-
- flags = RW_NONE;
-
- mapping_finished = true;
- break;
-
- default:
- yaml_error(parser, &sub_event, "Unexpected event in rewrite mapping");
- errors++;
- break;
- }
+ handle_rewrite_event(parser, &sub_event, &key,
+ &search_pattern, &replace_pattern,
+ &flags, &mapping_finished, jb, &errors);
yaml_event_delete(&sub_event);
}
- freez(replace_pattern);
- replace_pattern = NULL;
- freez(search_pattern);
- search_pattern = NULL;
- freez(key);
- key = NULL;
- }
break;
+ }
case YAML_SEQUENCE_END_EVENT:
finished = true;
@@ -618,25 +658,36 @@ static size_t yaml_parse_renames(yaml_parser_t *parser, LOG_JOB *jb) {
switch (sub_event.type) {
case YAML_SCALAR_EVENT:
if (yaml_scalar_matches(&sub_event, "new_key", strlen("new_key"))) {
- if (!yaml_parse(parser, &sub_event) || sub_event.type != YAML_SCALAR_EVENT) {
- yaml_error(parser, &sub_event, "Expected scalar for rename new_key");
+ yaml_event_t value_event;
+
+ if (!yaml_parse(parser, &value_event) || value_event.type != YAML_SCALAR_EVENT) {
+ yaml_error(parser, &value_event, "Expected scalar for rename new_key");
errors++;
} else {
- hashed_key_len_set(&rn.new_key, (char *)sub_event.data.scalar.value, sub_event.data.scalar.length);
- yaml_event_delete(&sub_event);
+ hashed_key_set(
+ &rn.new_key,
+ (char *)value_event.data.scalar.value,
+ value_event.data.scalar.length);
+ yaml_event_delete(&value_event);
}
} else if (yaml_scalar_matches(&sub_event, "old_key", strlen("old_key"))) {
- if (!yaml_parse(parser, &sub_event) || sub_event.type != YAML_SCALAR_EVENT) {
- yaml_error(parser, &sub_event, "Expected scalar for rename old_key");
+ yaml_event_t value_event;
+
+ if (!yaml_parse(parser, &value_event) || value_event.type != YAML_SCALAR_EVENT) {
+ yaml_error(parser, &value_event, "Expected scalar for rename old_key");
errors++;
} else {
- hashed_key_len_set(&rn.old_key, (char *)sub_event.data.scalar.value, sub_event.data.scalar.length);
- yaml_event_delete(&sub_event);
+ hashed_key_set(
+ &rn.old_key,
+ (char *)value_event.data.scalar.value,
+ value_event.data.scalar.length);
+ yaml_event_delete(&value_event);
}
} else {
yaml_error(parser, &sub_event, "Unexpected scalar in rewrite mapping");
errors++;
}
+
break;
case YAML_MAPPING_END_EVENT:
@@ -782,18 +833,22 @@ cleanup:
bool yaml_parse_file(const char *config_file_path, LOG_JOB *jb) {
if(!config_file_path || !*config_file_path) {
- log2stderr("yaml configuration filename cannot be empty.");
+ l2j_log("yaml configuration filename cannot be empty.");
return false;
}
FILE *fp = fopen(config_file_path, "r");
if (!fp) {
- log2stderr("Error opening config file: %s", config_file_path);
+ l2j_log("Error opening config file: %s", config_file_path);
return false;
}
yaml_parser_t parser;
- yaml_parser_initialize(&parser);
+ if (!yaml_parser_initialize(&parser)) {
+ fclose(fp);
+ return false;
+ }
+
yaml_parser_set_input_file(&parser, fp);
size_t errors = yaml_parse_initialized(&parser, jb);
diff --git a/src/collectors/log2journal/log2journal.c b/src/collectors/log2journal/log2journal.c
index 0fbba0b0c..769547bc1 100644
--- a/src/collectors/log2journal/log2journal.c
+++ b/src/collectors/log2journal/log2journal.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-3.0-or-later
#include "log2journal.h"
+#include "libnetdata/required_dummies.h"
// ----------------------------------------------------------------------------
@@ -73,10 +74,13 @@ static inline HASHED_KEY *get_key_from_hashtable(LOG_JOB *jb, HASHED_KEY *k) {
ht_key->flags |= HK_COLLISION_CHECKED;
if(strcmp(ht_key->key, k->key) != 0)
- log2stderr("Hashtable collision detected on key '%s' (hash %lx) and '%s' (hash %lx). "
- "Please file a bug report.", ht_key->key, (unsigned long) ht_key->hash, k->key
- , (unsigned long) k->hash
- );
+ l2j_log(
+ "Hashtable collision detected on key '%s' (hash %lx) and '%s' (hash %lx). "
+ "Please file a bug report.",
+ ht_key->key,
+ (unsigned long)ht_key->hash,
+ k->key,
+ (unsigned long)k->hash);
}
}
else {
@@ -97,8 +101,9 @@ static inline HASHED_KEY *get_key_from_hashtable(LOG_JOB *jb, HASHED_KEY *k) {
static inline HASHED_KEY *get_key_from_hashtable_with_char_ptr(LOG_JOB *jb, const char *key) {
HASHED_KEY find = {
- .key = key,
- .len = strlen(key),
+ .flags = HK_NONE,
+ .key = key,
+ .len = strlen(key),
};
find.hash = XXH3_64bits(key, find.len);
@@ -109,24 +114,29 @@ static inline HASHED_KEY *get_key_from_hashtable_with_char_ptr(LOG_JOB *jb, cons
static inline void validate_key(LOG_JOB *jb __maybe_unused, HASHED_KEY *k) {
if(k->len > JOURNAL_MAX_KEY_LEN)
- log2stderr("WARNING: key '%s' has length %zu, which is more than %zu, the max systemd-journal allows",
- k->key, (size_t)k->len, (size_t)JOURNAL_MAX_KEY_LEN);
+ l2j_log(
+ "WARNING: key '%s' has length %zu, which is more than %zu, the max systemd-journal allows",
+ k->key,
+ (size_t)k->len,
+ (size_t)JOURNAL_MAX_KEY_LEN);
for(size_t i = 0; i < k->len ;i++) {
char c = k->key[i];
if((c < 'A' || c > 'Z') && !isdigit(c) && c != '_') {
- log2stderr("WARNING: key '%s' contains characters that are not allowed by systemd-journal.", k->key);
+ l2j_log("WARNING: key '%s' contains characters that are not allowed by systemd-journal.", k->key);
break;
}
}
if(isdigit(k->key[0]))
- log2stderr("WARNING: key '%s' starts with a digit and may not be accepted by systemd-journal.", k->key);
+ l2j_log("WARNING: key '%s' starts with a digit and may not be accepted by systemd-journal.", k->key);
if(k->key[0] == '_')
- log2stderr("WARNING: key '%s' starts with an underscore, which makes it a systemd-journal trusted field. "
- "Such fields are accepted by systemd-journal-remote, but not by systemd-journald.", k->key);
+ l2j_log(
+ "WARNING: key '%s' starts with an underscore, which makes it a systemd-journal trusted field. "
+ "Such fields are accepted by systemd-journal-remote, but not by systemd-journald.",
+ k->key);
}
// ----------------------------------------------------------------------------
@@ -170,16 +180,16 @@ static inline void replace_evaluate(LOG_JOB *jb, HASHED_KEY *k, REPLACE_PATTERN
for(REPLACE_NODE *node = rp->nodes; node != NULL; node = node->next) {
if(node->is_variable) {
if(hashed_keys_match(&node->name, &jb->line.key))
- txt_expand_and_append(&ht_key->value, jb->line.trimmed, jb->line.trimmed_len);
+ txt_l2j_append(&ht_key->value, jb->line.trimmed, jb->line.trimmed_len);
else {
HASHED_KEY *ktmp = get_key_from_hashtable_with_char_ptr(jb, node->name.key);
if(ktmp->value.len)
- txt_expand_and_append(&ht_key->value, ktmp->value.txt, ktmp->value.len);
+ txt_l2j_append(&ht_key->value, ktmp->value.txt, ktmp->value.len);
}
}
else
- txt_expand_and_append(&ht_key->value, node->name.key, node->name.len);
+ txt_l2j_append(&ht_key->value, node->name.key, node->name.len);
}
}
@@ -202,26 +212,26 @@ static inline void replace_evaluate_from_pcre2(LOG_JOB *jb, HASHED_KEY *k, REPLA
PCRE2_SIZE end_offset = ovector[2 * group_number + 1];
PCRE2_SIZE length = end_offset - start_offset;
- txt_expand_and_append(&jb->rewrites.tmp, k->value.txt + start_offset, length);
+ txt_l2j_append(&jb->rewrites.tmp, k->value.txt + start_offset, length);
}
else {
if(hashed_keys_match(&node->name, &jb->line.key))
- txt_expand_and_append(&jb->rewrites.tmp, jb->line.trimmed, jb->line.trimmed_len);
+ txt_l2j_append(&jb->rewrites.tmp, jb->line.trimmed, jb->line.trimmed_len);
else {
HASHED_KEY *ktmp = get_key_from_hashtable_with_char_ptr(jb, node->name.key);
if(ktmp->value.len)
- txt_expand_and_append(&jb->rewrites.tmp, ktmp->value.txt, ktmp->value.len);
+ txt_l2j_append(&jb->rewrites.tmp, ktmp->value.txt, ktmp->value.len);
}
}
}
else {
- txt_expand_and_append(&jb->rewrites.tmp, node->name.key, node->name.len);
+ txt_l2j_append(&jb->rewrites.tmp, node->name.key, node->name.len);
}
}
// swap the values of the temporary TEXT and the key value
- TEXT tmp = k->value;
+ TXT_L2J tmp = k->value;
k->value = jb->rewrites.tmp;
jb->rewrites.tmp = tmp;
}
@@ -271,7 +281,7 @@ static inline HASHED_KEY *rename_key(LOG_JOB *jb, HASHED_KEY *k) {
static inline void send_key_value_constant(LOG_JOB *jb __maybe_unused, HASHED_KEY *key, const char *value, size_t len) {
HASHED_KEY *ht_key = get_key_from_hashtable(jb, key);
- txt_replace(&ht_key->value, value, len);
+ txt_l2j_set(&ht_key->value, value, len);
ht_key->flags |= HK_VALUE_FROM_LOG;
// fprintf(stderr, "SET %s=%.*s\n", ht_key->key, (int)ht_key->value.len, ht_key->value.txt);
@@ -292,7 +302,7 @@ static inline void send_key_value_error(LOG_JOB *jb, HASHED_KEY *key, const char
inline void log_job_send_extracted_key_value(LOG_JOB *jb, const char *key, const char *value, size_t len) {
HASHED_KEY *ht_key = get_key_from_hashtable_with_char_ptr(jb, key);
HASHED_KEY *nk = rename_key(jb, ht_key);
- txt_replace(&nk->value, value, len);
+ txt_l2j_set(&nk->value, value, len);
ht_key->flags |= HK_VALUE_FROM_LOG;
// fprintf(stderr, "SET %s=%.*s\n", ht_key->key, (int)ht_key->value.len, ht_key->value.txt);
@@ -417,7 +427,7 @@ static inline bool jb_switched_filename(LOG_JOB *jb, const char *line, size_t le
const char *end = strstr(line, " <==");
while (*start == ' ') start++;
if (*start != '\n' && *start != '\0' && end) {
- txt_replace(&jb->filename.current, start, end - start);
+ txt_l2j_set(&jb->filename.current, start, end - start);
return true;
}
}
@@ -486,7 +496,7 @@ int log_job_run(LOG_JOB *jb) {
else if(strcmp(jb->pattern, "none") != 0) {
pcre2 = pcre2_parser_create(jb);
if(pcre2_has_error(pcre2)) {
- log2stderr("%s", pcre2_parser_error(pcre2));
+ l2j_log("%s", pcre2_parser_error(pcre2));
pcre2_parser_destroy(pcre2);
return 1;
}
@@ -515,11 +525,11 @@ int log_job_run(LOG_JOB *jb) {
if(!line_is_matched) {
if(json)
- log2stderr("%s", json_parser_error(json));
+ l2j_log("%s", json_parser_error(json));
else if(logfmt)
- log2stderr("%s", logfmt_parser_error(logfmt));
+ l2j_log("%s", logfmt_parser_error(logfmt));
else if(pcre2)
- log2stderr("%s", pcre2_parser_error(pcre2));
+ l2j_log("%s", pcre2_parser_error(pcre2));
if(!jb_send_unmatched_line(jb, line))
// just logging to stderr, not sending unmatched lines
diff --git a/src/collectors/log2journal/log2journal.h b/src/collectors/log2journal/log2journal.h
index 5bdf7276b..480c0598c 100644
--- a/src/collectors/log2journal/log2journal.h
+++ b/src/collectors/log2journal/log2journal.h
@@ -3,49 +3,16 @@
#ifndef NETDATA_LOG2JOURNAL_H
#define NETDATA_LOG2JOURNAL_H
-// only for PACKAGE_VERSION
-#include <config.h>
-
-#include <stdio.h>
-#include <stdlib.h>
-#include <dirent.h>
-#include <string.h>
-#include <stdbool.h>
-#include <string.h>
-#include <ctype.h>
-#include <math.h>
-#include <stdarg.h>
-#include <assert.h>
-
-// ----------------------------------------------------------------------------
-// compatibility
-
-#ifndef HAVE_STRNDUP
-// strndup() is not available on Windows
-static inline char *os_strndup( const char *s1, size_t n)
-{
- char *copy= (char*)malloc( n+1 );
- memcpy( copy, s1, n );
- copy[n] = 0;
- return copy;
-};
-#define strndup(s, n) os_strndup(s, n)
-#endif
-
-#if defined(HAVE_FUNC_ATTRIBUTE_FORMAT_GNU_PRINTF)
-#define PRINTFLIKE(f, a) __attribute__ ((format(gnu_printf, f, a)))
-#elif defined(HAVE_FUNC_ATTRIBUTE_FORMAT_PRINTF)
-#define PRINTFLIKE(f, a) __attribute__ ((format(printf, f, a)))
-#else
-#define PRINTFLIKE(f, a)
-#endif
+#include "libnetdata/libnetdata.h"
+#include "log2journal-txt.h"
+#include "log2journal-hashed-key.h"
// ----------------------------------------------------------------------------
// logging
// enable the compiler to check for printf like errors on our log2stderr() function
-static inline void log2stderr(const char *format, ...) PRINTFLIKE(1, 2);
-static inline void log2stderr(const char *format, ...) {
+static inline void l2j_log(const char *format, ...) PRINTFLIKE(1, 2);
+static inline void l2j_log(const char *format, ...) {
va_list args;
va_start(args, format);
vfprintf(stderr, format, args);
@@ -54,62 +21,6 @@ static inline void log2stderr(const char *format, ...) {
}
// ----------------------------------------------------------------------------
-// allocation functions abstraction
-
-static inline void *mallocz(size_t size) {
- void *ptr = malloc(size);
- if (!ptr) {
- log2stderr("Fatal Error: Memory allocation failed. Requested size: %zu bytes.", size);
- exit(EXIT_FAILURE);
- }
- return ptr;
-}
-
-static inline void *callocz(size_t elements, size_t size) {
- void *ptr = calloc(elements, size);
- if (!ptr) {
- log2stderr("Fatal Error: Memory allocation failed. Requested size: %zu bytes.", elements * size);
- exit(EXIT_FAILURE);
- }
- return ptr;
-}
-
-static inline void *reallocz(void *ptr, size_t size) {
- void *new_ptr = realloc(ptr, size);
- if (!new_ptr) {
- log2stderr("Fatal Error: Memory reallocation failed. Requested size: %zu bytes.", size);
- exit(EXIT_FAILURE);
- }
- return new_ptr;
-}
-
-static inline char *strdupz(const char *s) {
- char *ptr = strdup(s);
- if (!ptr) {
- log2stderr("Fatal Error: Memory allocation failed in strdup.");
- exit(EXIT_FAILURE);
- }
- return ptr;
-}
-
-static inline char *strndupz(const char *s, size_t n) {
- char *ptr = strndup(s, n);
- if (!ptr) {
- log2stderr("Fatal Error: Memory allocation failed in strndup. Requested size: %zu bytes.", n);
- exit(EXIT_FAILURE);
- }
- return ptr;
-}
-
-static inline void freez(void *ptr) {
- if (ptr)
- free(ptr);
-}
-
-// ----------------------------------------------------------------------------
-
-#define XXH_INLINE_ALL
-#include "libnetdata/xxhash.h"
#define PCRE2_CODE_UNIT_WIDTH 8
#include <pcre2.h>
@@ -121,15 +32,12 @@ static inline void freez(void *ptr) {
// ----------------------------------------------------------------------------
// hashtable for HASHED_KEY
-// cleanup hashtable defines
-#include "libnetdata/simple_hashtable_undef.h"
-
struct hashed_key;
static inline int compare_keys(struct hashed_key *k1, struct hashed_key *k2);
#define SIMPLE_HASHTABLE_SORT_FUNCTION compare_keys
-#define SIMPLE_HASHTABLE_VALUE_TYPE struct hashed_key
+#define SIMPLE_HASHTABLE_VALUE_TYPE HASHED_KEY
#define SIMPLE_HASHTABLE_NAME _KEY
-#include "libnetdata/simple_hashtable.h"
+#include "libnetdata/simple_hashtable/simple_hashtable.h"
// ----------------------------------------------------------------------------
@@ -173,151 +81,12 @@ static inline size_t copy_to_buffer(char *dst, size_t dst_size, const char *src,
}
// ----------------------------------------------------------------------------
-// A dynamically sized, reusable text buffer,
-// allowing us to be fast (no allocations during iterations) while having the
-// smallest possible allocations.
-
-typedef struct txt {
- char *txt;
- uint32_t size;
- uint32_t len;
-} TEXT;
-
-static inline void txt_cleanup(TEXT *t) {
- if(!t)
- return;
-
- if(t->txt)
- freez(t->txt);
-
- t->txt = NULL;
- t->size = 0;
- t->len = 0;
-}
-
-static inline void txt_replace(TEXT *t, const char *s, size_t len) {
- if(!s || !*s || len == 0) {
- s = "";
- len = 0;
- }
-
- if(len + 1 <= t->size) {
- // the existing value allocation, fits our value
-
- memcpy(t->txt, s, len);
- t->txt[len] = '\0';
- t->len = len;
- }
- else {
- // no existing value allocation, or too small for our value
- // cleanup and increase the buffer
-
- txt_cleanup(t);
-
- t->txt = strndupz(s, len);
- t->size = len + 1;
- t->len = len;
- }
-}
-
-static inline void txt_expand_and_append(TEXT *t, const char *s, size_t len) {
- if(len + 1 > (t->size - t->len)) {
- size_t new_size = t->len + len + 1;
- if(new_size < t->size * 2)
- new_size = t->size * 2;
-
- t->txt = reallocz(t->txt, new_size);
- t->size = new_size;
- }
-
- char *copy_to = &t->txt[t->len];
- memcpy(copy_to, s, len);
- copy_to[len] = '\0';
- t->len += len;
-}
-
-// ----------------------------------------------------------------------------
-
-typedef enum __attribute__((__packed__)) {
- HK_NONE = 0,
-
- // permanent flags - they are set once to optimize various decisions and lookups
-
- HK_HASHTABLE_ALLOCATED = (1 << 0), // this is key object allocated in the hashtable
- // objects that do not have this, have a pointer to a key in the hashtable
- // objects that have this, value a value allocated
-
- HK_FILTERED = (1 << 1), // we checked once if this key in filtered
- HK_FILTERED_INCLUDED = (1 << 2), // the result of the filtering was to include it in the output
-
- HK_COLLISION_CHECKED = (1 << 3), // we checked once for collision check of this key
-
- HK_RENAMES_CHECKED = (1 << 4), // we checked once if there are renames on this key
- HK_HAS_RENAMES = (1 << 5), // and we found there is a rename rule related to it
-
- // ephemeral flags - they are unset at the end of each log line
-
- HK_VALUE_FROM_LOG = (1 << 14), // the value of this key has been read from the log (or from injection, duplication)
- HK_VALUE_REWRITTEN = (1 << 15), // the value of this key has been rewritten due to one of our rewrite rules
-
-} HASHED_KEY_FLAGS;
-
-typedef struct hashed_key {
- const char *key;
- uint32_t len;
- HASHED_KEY_FLAGS flags;
- XXH64_hash_t hash;
- union {
- struct hashed_key *hashtable_ptr; // HK_HASHTABLE_ALLOCATED is not set
- TEXT value; // HK_HASHTABLE_ALLOCATED is set
- };
-} HASHED_KEY;
-
-static inline void hashed_key_cleanup(HASHED_KEY *k) {
- if(k->key) {
- freez((void *)k->key);
- k->key = NULL;
- }
-
- if(k->flags & HK_HASHTABLE_ALLOCATED)
- txt_cleanup(&k->value);
- else
- k->hashtable_ptr = NULL;
-}
-
-static inline void hashed_key_set(HASHED_KEY *k, const char *name) {
- hashed_key_cleanup(k);
-
- k->key = strdupz(name);
- k->len = strlen(k->key);
- k->hash = XXH3_64bits(k->key, k->len);
- k->flags = HK_NONE;
-}
-
-static inline void hashed_key_len_set(HASHED_KEY *k, const char *name, size_t len) {
- hashed_key_cleanup(k);
-
- k->key = strndupz(name, len);
- k->len = len;
- k->hash = XXH3_64bits(k->key, k->len);
- k->flags = HK_NONE;
-}
-
-static inline bool hashed_keys_match(HASHED_KEY *k1, HASHED_KEY *k2) {
- return ((k1 == k2) || (k1->hash == k2->hash && strcmp(k1->key, k2->key) == 0));
-}
-
-static inline int compare_keys(struct hashed_key *k1, struct hashed_key *k2) {
- return strcmp(k1->key, k2->key);
-}
-
-// ----------------------------------------------------------------------------
typedef struct search_pattern {
const char *pattern;
pcre2_code *re;
pcre2_match_data *match_data;
- TEXT error;
+ TXT_L2J error;
} SEARCH_PATTERN;
void search_pattern_cleanup(SEARCH_PATTERN *sp);
@@ -416,7 +185,7 @@ typedef struct log_job {
struct {
bool last_line_was_empty;
HASHED_KEY key;
- TEXT current;
+ TXT_L2J current;
} filename;
struct {
@@ -435,7 +204,7 @@ typedef struct log_job {
struct {
uint32_t used;
REWRITE array[MAX_REWRITES];
- TEXT tmp;
+ TXT_L2J tmp;
} rewrites;
struct {
diff --git a/src/collectors/macos.plugin/integrations/macos.md b/src/collectors/macos.plugin/integrations/macos.md
index 9445b9a61..6b9e3f239 100644
--- a/src/collectors/macos.plugin/integrations/macos.md
+++ b/src/collectors/macos.plugin/integrations/macos.md
@@ -188,8 +188,8 @@ The file format is a modified INI syntax. The general structure is:
[section2]
option3 = some third value
```
-You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the
+Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
```bash
cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
diff --git a/src/collectors/macos.plugin/macos_fw.c b/src/collectors/macos.plugin/macos_fw.c
index 75ef386b9..a97c5bc25 100644
--- a/src/collectors/macos.plugin/macos_fw.c
+++ b/src/collectors/macos.plugin/macos_fw.c
@@ -199,7 +199,6 @@ int do_macos_iokit(int update_every, usec_t dt) {
, update_every
, RRDSET_TYPE_LINE
);
- rrdset_flag_set(st, RRDSET_FLAG_DETAIL);
rrddim_add(st, "reads", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
rrddim_add(st, "writes", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
@@ -235,7 +234,6 @@ int do_macos_iokit(int update_every, usec_t dt) {
, update_every
, RRDSET_TYPE_AREA
);
- rrdset_flag_set(st, RRDSET_FLAG_DETAIL);
rrddim_add(st, "utilization", NULL, 1, 10000000, RRD_ALGORITHM_INCREMENTAL);
}
@@ -270,7 +268,6 @@ int do_macos_iokit(int update_every, usec_t dt) {
, update_every
, RRDSET_TYPE_LINE
);
- rrdset_flag_set(st, RRDSET_FLAG_DETAIL);
rrddim_add(st, "reads", NULL, 1, 1000000, RRD_ALGORITHM_INCREMENTAL);
rrddim_add(st, "writes", NULL, -1, 1000000, RRD_ALGORITHM_INCREMENTAL);
@@ -302,7 +299,6 @@ int do_macos_iokit(int update_every, usec_t dt) {
, update_every
, RRDSET_TYPE_LINE
);
- rrdset_flag_set(st, RRDSET_FLAG_DETAIL);
rrddim_add(st, "reads", NULL, 1, 1000000, RRD_ALGORITHM_ABSOLUTE);
rrddim_add(st, "writes", NULL, -1, 1000000, RRD_ALGORITHM_ABSOLUTE);
@@ -330,7 +326,6 @@ int do_macos_iokit(int update_every, usec_t dt) {
, update_every
, RRDSET_TYPE_AREA
);
- rrdset_flag_set(st, RRDSET_FLAG_DETAIL);
rrddim_add(st, "reads", NULL, 1, 1024, RRD_ALGORITHM_ABSOLUTE);
rrddim_add(st, "writes", NULL, -1, 1024, RRD_ALGORITHM_ABSOLUTE);
@@ -358,7 +353,6 @@ int do_macos_iokit(int update_every, usec_t dt) {
, update_every
, RRDSET_TYPE_LINE
);
- rrdset_flag_set(st, RRDSET_FLAG_DETAIL);
rrddim_add(st, "svctm", NULL, 1, 1000000, RRD_ALGORITHM_ABSOLUTE);
}
@@ -549,7 +543,6 @@ int do_macos_iokit(int update_every, usec_t dt) {
, update_every
, RRDSET_TYPE_LINE
);
- rrdset_flag_set(st, RRDSET_FLAG_DETAIL);
rrddim_add(st, "received", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
rrddim_add(st, "sent", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
@@ -579,7 +572,6 @@ int do_macos_iokit(int update_every, usec_t dt) {
, update_every
, RRDSET_TYPE_LINE
);
- rrdset_flag_set(st, RRDSET_FLAG_DETAIL);
rrddim_add(st, "inbound", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
rrddim_add(st, "outbound", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
@@ -605,7 +597,6 @@ int do_macos_iokit(int update_every, usec_t dt) {
, update_every
, RRDSET_TYPE_LINE
);
- rrdset_flag_set(st, RRDSET_FLAG_DETAIL);
rrddim_add(st, "inbound", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
}
@@ -629,7 +620,6 @@ int do_macos_iokit(int update_every, usec_t dt) {
, update_every
, RRDSET_TYPE_LINE
);
- rrdset_flag_set(st, RRDSET_FLAG_DETAIL);
rrddim_add(st, "frames", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
rrddim_add(st, "collisions", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
diff --git a/src/collectors/macos.plugin/macos_mach_smi.c b/src/collectors/macos.plugin/macos_mach_smi.c
index 30c957187..fa88a5b7c 100644
--- a/src/collectors/macos.plugin/macos_mach_smi.c
+++ b/src/collectors/macos.plugin/macos_mach_smi.c
@@ -192,7 +192,6 @@ int do_macos_mach_smi(int update_every, usec_t dt) {
, update_every
, RRDSET_TYPE_LINE
);
- rrdset_flag_set(st, RRDSET_FLAG_DETAIL);
rrddim_add(st, "memory", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
rrddim_add(st, "cow", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
diff --git a/src/collectors/macos.plugin/macos_sysctl.c b/src/collectors/macos.plugin/macos_sysctl.c
index 825125365..83e941df4 100644
--- a/src/collectors/macos.plugin/macos_sysctl.c
+++ b/src/collectors/macos.plugin/macos_sysctl.c
@@ -278,7 +278,6 @@ int do_macos_sysctl(int update_every, usec_t dt) {
, update_every
, RRDSET_TYPE_STACKED
);
- rrdset_flag_set(st, RRDSET_FLAG_DETAIL);
rrddim_add(st, "free", NULL, 1, 1048576, RRD_ALGORITHM_ABSOLUTE);
rrddim_add(st, "used", NULL, 1, 1048576, RRD_ALGORITHM_ABSOLUTE);
@@ -411,7 +410,6 @@ int do_macos_sysctl(int update_every, usec_t dt) {
, update_every
, RRDSET_TYPE_LINE
);
- rrdset_flag_set(st, RRDSET_FLAG_DETAIL);
rrddim_add(st, "InErrs", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
rrddim_add(st, "InCsumErrors", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
@@ -441,7 +439,6 @@ int do_macos_sysctl(int update_every, usec_t dt) {
, update_every
, RRDSET_TYPE_LINE
);
- rrdset_flag_set(st, RRDSET_FLAG_DETAIL);
rrddim_add(st, "EstabResets", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
rrddim_add(st, "ActiveOpens", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
@@ -564,7 +561,6 @@ int do_macos_sysctl(int update_every, usec_t dt) {
, update_every
, RRDSET_TYPE_LINE
);
- rrdset_flag_set(st, RRDSET_FLAG_DETAIL);
rrddim_add(st, "InCEPkts", "CEP", 1, 1, RRD_ALGORITHM_INCREMENTAL);
rrddim_add(st, "InNoECTPkts", "NoECTP", -1, 1, RRD_ALGORITHM_INCREMENTAL);
@@ -631,7 +627,6 @@ int do_macos_sysctl(int update_every, usec_t dt) {
, update_every
, RRDSET_TYPE_LINE
);
- rrdset_flag_set(st, RRDSET_FLAG_DETAIL);
rrddim_add(st, "RcvbufErrors", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
rrddim_add(st, "InErrors", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
@@ -819,7 +814,6 @@ int do_macos_sysctl(int update_every, usec_t dt) {
, update_every
, RRDSET_TYPE_LINE
);
- rrdset_flag_set(st, RRDSET_FLAG_DETAIL);
rrddim_add(st, "FragOKs", "ok", 1, 1, RRD_ALGORITHM_INCREMENTAL);
rrddim_add(st, "FragFails", "failed", -1, 1, RRD_ALGORITHM_INCREMENTAL);
@@ -849,7 +843,6 @@ int do_macos_sysctl(int update_every, usec_t dt) {
, update_every
, RRDSET_TYPE_LINE
);
- rrdset_flag_set(st, RRDSET_FLAG_DETAIL);
rrddim_add(st, "ReasmOKs", "ok", 1, 1, RRD_ALGORITHM_INCREMENTAL);
rrddim_add(st, "ReasmFails", "failed", -1, 1, RRD_ALGORITHM_INCREMENTAL);
@@ -879,7 +872,6 @@ int do_macos_sysctl(int update_every, usec_t dt) {
, update_every
, RRDSET_TYPE_LINE
);
- rrdset_flag_set(st, RRDSET_FLAG_DETAIL);
rrddim_add(st, "InDiscards", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
rrddim_add(st, "OutDiscards", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
@@ -963,7 +955,6 @@ int do_macos_sysctl(int update_every, usec_t dt) {
, update_every
, RRDSET_TYPE_LINE
);
- rrdset_flag_set(st, RRDSET_FLAG_DETAIL);
rrddim_add(st, "ok", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
rrddim_add(st, "failed", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
@@ -994,7 +985,6 @@ int do_macos_sysctl(int update_every, usec_t dt) {
, update_every
, RRDSET_TYPE_LINE
);
- rrdset_flag_set(st, RRDSET_FLAG_DETAIL);
rrddim_add(st, "ok", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
rrddim_add(st, "failed", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
@@ -1027,7 +1017,6 @@ int do_macos_sysctl(int update_every, usec_t dt) {
, update_every
, RRDSET_TYPE_LINE
);
- rrdset_flag_set(st, RRDSET_FLAG_DETAIL);
rrddim_add(st, "InDiscards", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
rrddim_add(st, "OutDiscards", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
diff --git a/src/collectors/macos.plugin/plugin_macos.c b/src/collectors/macos.plugin/plugin_macos.c
index 0d651ae69..6f5b892d8 100644
--- a/src/collectors/macos.plugin/plugin_macos.c
+++ b/src/collectors/macos.plugin/plugin_macos.c
@@ -54,13 +54,12 @@ void *macos_main(void *ptr)
worker_register_job_name(i, macos_modules[i].dim);
}
- usec_t step = localhost->rrd_update_every * USEC_PER_SEC;
heartbeat_t hb;
- heartbeat_init(&hb);
+ heartbeat_init(&hb, localhost->rrd_update_every * USEC_PER_SEC);
while(service_running(SERVICE_COLLECTORS)) {
worker_is_idle();
- usec_t hb_dt = heartbeat_next(&hb, step);
+ usec_t hb_dt = heartbeat_next(&hb);
if (!service_running(SERVICE_COLLECTORS))
break;
diff --git a/src/collectors/network-viewer.plugin/network-viewer.c b/src/collectors/network-viewer.plugin/network-viewer.c
index 06dde7382..c0ea8af5e 100644
--- a/src/collectors/network-viewer.plugin/network-viewer.c
+++ b/src/collectors/network-viewer.plugin/network-viewer.c
@@ -23,20 +23,18 @@ static SPAWN_SERVER *spawn_srv = NULL;
} aggregated_key; \
} network_viewer;
-#include "libnetdata/maps/local-sockets.h"
-#include "libnetdata/maps/system-users.h"
-#include "libnetdata/maps/system-services.h"
+#include "libnetdata/local-sockets/local-sockets.h"
+#include "libnetdata/os/system-maps/system-services.h"
#define NETWORK_CONNECTIONS_VIEWER_FUNCTION "network-connections"
#define NETWORK_CONNECTIONS_VIEWER_HELP "Network connections explorer"
#define SIMPLE_HASHTABLE_VALUE_TYPE LOCAL_SOCKET
#define SIMPLE_HASHTABLE_NAME _AGGREGATED_SOCKETS
-#include "libnetdata/simple_hashtable.h"
+#include "libnetdata/simple_hashtable/simple_hashtable.h"
netdata_mutex_t stdout_mutex = NETDATA_MUTEX_INITIALIZER;
static bool plugin_should_exit = false;
-static USERNAMES_CACHE *uc;
static SERVICENAMES_CACHE *sc;
ENUM_STR_MAP_DEFINE(SOCKET_DIRECTION) = {
@@ -80,7 +78,7 @@ struct sockets_stats {
} max;
};
-static void local_socket_to_json_array(struct sockets_stats *st, LOCAL_SOCKET *n, uint64_t proc_self_net_ns_inode, bool aggregated) {
+static void local_socket_to_json_array(struct sockets_stats *st, const LOCAL_SOCKET *n, uint64_t proc_self_net_ns_inode, bool aggregated) {
if(n->direction == SOCKET_DIRECTION_NONE)
return;
@@ -151,12 +149,12 @@ static void local_socket_to_json_array(struct sockets_stats *st, LOCAL_SOCKET *n
}
else {
// buffer_json_add_array_item_uint64(wb, n->uid);
- STRING *u = system_usernames_cache_lookup_uid(uc, n->uid);
- buffer_json_add_array_item_string(wb, string2str(u));
- string_freez(u);
+ CACHED_USERNAME cu = cached_username_get_by_uid(n->uid);
+ buffer_json_add_array_item_string(wb, string2str(cu.username));
+ cached_username_release(cu);
}
- struct socket_endpoint *server_endpoint;
+ const struct socket_endpoint *server_endpoint;
const char *server_address;
const char *client_address_space;
const char *server_address_space;
@@ -240,7 +238,9 @@ static void local_socket_to_json_array(struct sockets_stats *st, LOCAL_SOCKET *n
buffer_json_array_close(wb);
}
-static void populate_aggregated_key(LOCAL_SOCKET *n) {
+static void populate_aggregated_key(const LOCAL_SOCKET *nn) {
+ LOCAL_SOCKET *n = (LOCAL_SOCKET *)nn;
+
n->network_viewer.count = 1;
n->network_viewer.aggregated_key.pid = n->pid;
@@ -269,7 +269,7 @@ static void populate_aggregated_key(LOCAL_SOCKET *n) {
n->network_viewer.aggregated_key.remote_address_space = local_sockets_address_space(&n->remote);
}
-static void local_sockets_cb_to_json(LS_STATE *ls, LOCAL_SOCKET *n, void *data) {
+static void local_sockets_cb_to_json(LS_STATE *ls, const LOCAL_SOCKET *n, void *data) {
struct sockets_stats *st = data;
populate_aggregated_key(n);
local_socket_to_json_array(st, n, ls->proc_self_net_ns_inode, false);
@@ -280,12 +280,12 @@ static void local_sockets_cb_to_json(LS_STATE *ls, LOCAL_SOCKET *n, void *data)
#define SUM_THEM_ALL(a, b) (a) += (b)
#define OR_THEM_ALL(a, b) (a) |= (b)
-static void local_sockets_cb_to_aggregation(LS_STATE *ls __maybe_unused, LOCAL_SOCKET *n, void *data) {
+static void local_sockets_cb_to_aggregation(LS_STATE *ls __maybe_unused, const LOCAL_SOCKET *n, void *data) {
SIMPLE_HASHTABLE_AGGREGATED_SOCKETS *ht = data;
populate_aggregated_key(n);
XXH64_hash_t hash = XXH3_64bits(&n->network_viewer.aggregated_key, sizeof(n->network_viewer.aggregated_key));
- SIMPLE_HASHTABLE_SLOT_AGGREGATED_SOCKETS *sl = simple_hashtable_get_slot_AGGREGATED_SOCKETS(ht, hash, n, true);
+ SIMPLE_HASHTABLE_SLOT_AGGREGATED_SOCKETS *sl = simple_hashtable_get_slot_AGGREGATED_SOCKETS(ht, hash, (LOCAL_SOCKET *)n, true);
LOCAL_SOCKET *t = SIMPLE_HASHTABLE_SLOT_DATA(sl);
if(t) {
t->network_viewer.count++;
@@ -464,7 +464,7 @@ void network_viewer_function(const char *transaction, char *function __maybe_unu
char function_copy[strlen(function) + 1];
memcpy(function_copy, function, sizeof(function_copy));
char *words[1024];
- size_t num_words = quoted_strings_splitter_pluginsd(function_copy, words, 1024);
+ size_t num_words = quoted_strings_splitter_whitespace(function_copy, words, 1024);
for(size_t i = 1; i < num_words ;i++) {
char *param = get_word(words, num_words, i);
if(strcmp(param, "sockets:aggregated") == 0) {
@@ -511,7 +511,9 @@ void network_viewer_function(const char *transaction, char *function __maybe_unu
.max_errors = 10,
.max_concurrent_namespaces = 5,
},
+#if defined(LOCAL_SOCKETS_USE_SETNS)
.spawn_server = spawn_srv,
+#endif
.stats = { 0 },
.sockets_hashtable = { 0 },
.local_ips_hashtable = { 0 },
@@ -945,7 +947,10 @@ close_and_send:
buffer_json_finalize(wb);
netdata_mutex_lock(&stdout_mutex);
- pluginsd_function_result_to_stdout(transaction, HTTP_RESP_OK, "application/json", now_s + 1, wb);
+ wb->response_code = HTTP_RESP_OK;
+ wb->content_type = CT_APPLICATION_JSON;
+ wb->expires = now_s + 1;
+ pluginsd_function_result_to_stdout(transaction, wb);
netdata_mutex_unlock(&stdout_mutex);
}
@@ -953,20 +958,22 @@ close_and_send:
// main
int main(int argc __maybe_unused, char **argv __maybe_unused) {
- clocks_init();
nd_thread_tag_set("NETWORK-VIEWER");
nd_log_initialize_for_external_plugins("network-viewer.plugin");
netdata_configured_host_prefix = getenv("NETDATA_HOST_PREFIX");
if(verify_netdata_host_prefix(true) == -1) exit(1);
+#if defined(LOCAL_SOCKETS_USE_SETNS)
spawn_srv = spawn_server_create(SPAWN_SERVER_OPTION_CALLBACK, "setns", local_sockets_spawn_server_callback, argc, (const char **)argv);
if(spawn_srv == NULL) {
fprintf(stderr, "Cannot create spawn server.\n");
exit(1);
}
+#endif
- uc = system_usernames_cache_init();
+ cached_usernames_init();
+ update_cached_host_users();
sc = system_servicenames_cache_init();
// ----------------------------------------------------------------------------------------------------------------
@@ -1008,15 +1015,14 @@ int main(int argc __maybe_unused, char **argv __maybe_unused) {
// ----------------------------------------------------------------------------------------------------------------
- usec_t step_ut = 100 * USEC_PER_MS;
usec_t send_newline_ut = 0;
bool tty = isatty(fileno(stdout)) == 1;
heartbeat_t hb;
- heartbeat_init(&hb);
+ heartbeat_init(&hb, USEC_PER_SEC);
while(!plugin_should_exit) {
- usec_t dt_ut = heartbeat_next(&hb, step_ut);
+ usec_t dt_ut = heartbeat_next(&hb);
send_newline_ut += dt_ut;
if(!tty && send_newline_ut > USEC_PER_SEC) {
diff --git a/src/collectors/nfacct.plugin/integrations/netfilter.md b/src/collectors/nfacct.plugin/integrations/netfilter.md
index b8dcb8520..f8ba4ef74 100644
--- a/src/collectors/nfacct.plugin/integrations/netfilter.md
+++ b/src/collectors/nfacct.plugin/integrations/netfilter.md
@@ -106,8 +106,8 @@ The file format is a modified INI syntax. The general structure is:
[section2]
option3 = some third value
```
-You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the
+Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
```bash
cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
diff --git a/src/collectors/nfacct.plugin/plugin_nfacct.c b/src/collectors/nfacct.plugin/plugin_nfacct.c
index 92c82351a..6225ec4a6 100644
--- a/src/collectors/nfacct.plugin/plugin_nfacct.c
+++ b/src/collectors/nfacct.plugin/plugin_nfacct.c
@@ -747,7 +747,6 @@ void nfacct_signals()
}
int main(int argc, char **argv) {
- clocks_init();
nd_log_initialize_for_external_plugins("nfacct.plugin");
// ------------------------------------------------------------------------
@@ -832,12 +831,11 @@ int main(int argc, char **argv) {
time_t started_t = now_monotonic_sec();
size_t iteration;
- usec_t step = netdata_update_every * USEC_PER_SEC;
heartbeat_t hb;
- heartbeat_init(&hb);
+ heartbeat_init(&hb, netdata_update_every * USEC_PER_SEC);
for(iteration = 0; 1; iteration++) {
- usec_t dt = heartbeat_next(&hb, step);
+ usec_t dt = heartbeat_next(&hb);
if(unlikely(netdata_exit)) break;
diff --git a/src/collectors/perf.plugin/integrations/cpu_performance.md b/src/collectors/perf.plugin/integrations/cpu_performance.md
index c24a14a99..0db211167 100644
--- a/src/collectors/perf.plugin/integrations/cpu_performance.md
+++ b/src/collectors/perf.plugin/integrations/cpu_performance.md
@@ -97,21 +97,21 @@ There are no alerts configured by default for this integration.
#### Install perf plugin
-If you are [using our official native DEB/RPM packages](/packaging/installer/UPDATE.md#determine-which-installation-method-you-used), make sure the `netdata-plugin-perf` package is installed.
+If you are [using our official native DEB/RPM packages](https://github.com/netdata/netdata/blob/master/packaging/installer/UPDATE.md#determine-which-installation-method-you-used), make sure the `netdata-plugin-perf` package is installed.
#### Enable the perf plugin
The plugin is disabled by default because the number of PMUs is usually quite limited and it is not desired to allow Netdata to struggle silently for PMUs, interfering with other performance monitoring software.
-To enable it, use `edit-config` from the Netdata [config directory](/docs/netdata-agent/configuration/README.md), which is typically at `/etc/netdata`, to edit the `netdata.conf` file.
+To enable it, use `edit-config` from the Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md), which is typically at `/etc/netdata`, to edit the `netdata.conf` file.
```bash
cd /etc/netdata # Replace this path with your Netdata config directory, if different
sudo ./edit-config netdata.conf
```
-Change the value of the `perf` setting to `yes` in the `[plugins]` section. Save the file and restart the Netdata Agent with `sudo systemctl restart netdata`, or the [appropriate method](/packaging/installer/README.md#maintaining-a-netdata-agent-installation) for your system.
+Change the value of the `perf` setting to `yes` in the `[plugins]` section. Save the file and restart the Netdata Agent with `sudo systemctl restart netdata`, or the [appropriate method](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/start-stop-restart.md) for your system.
@@ -132,8 +132,8 @@ The file format is a modified INI syntax. The general structure is:
[section2]
option3 = some third value
```
-You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the
+Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
```bash
cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
diff --git a/src/collectors/perf.plugin/metadata.yaml b/src/collectors/perf.plugin/metadata.yaml
index 18841d53a..d72be7f5d 100644
--- a/src/collectors/perf.plugin/metadata.yaml
+++ b/src/collectors/perf.plugin/metadata.yaml
@@ -55,7 +55,7 @@ modules:
sudo ./edit-config netdata.conf
```
- Change the value of the `perf` setting to `yes` in the `[plugins]` section. Save the file and restart the Netdata Agent with `sudo systemctl restart netdata`, or the [appropriate method](/packaging/installer/README.md#maintaining-a-netdata-agent-installation) for your system.
+ Change the value of the `perf` setting to `yes` in the `[plugins]` section. Save the file and restart the Netdata Agent with `sudo systemctl restart netdata`, or the [appropriate method](/docs/netdata-agent/start-stop-restart.md) for your system.
configuration:
file:
name: "netdata.conf"
diff --git a/src/collectors/perf.plugin/perf_plugin.c b/src/collectors/perf.plugin/perf_plugin.c
index 8fb4014e4..ccc7016e2 100644
--- a/src/collectors/perf.plugin/perf_plugin.c
+++ b/src/collectors/perf.plugin/perf_plugin.c
@@ -240,7 +240,7 @@ static struct perf_event {
{EV_ID_END, 0, 0, NULL, NULL, 0, 0, 0, NULL, NULL, NULL}
};
-static int perf_init() {
+static bool perf_init() {
int cpu, group;
struct perf_event_attr perf_event_attr;
struct perf_event *current_event = NULL;
@@ -270,6 +270,8 @@ static int perf_init() {
memset(&perf_event_attr, 0, sizeof(perf_event_attr));
+ int enabled = 0;
+
for(cpu = 0; cpu < number_of_cpus; cpu++) {
for(current_event = &perf_events[0]; current_event->id != EV_ID_END; current_event++) {
if(unlikely(current_event->disabled)) continue;
@@ -304,6 +306,8 @@ static int perf_init() {
}
collector_error("Disabling event %u", current_event->id);
current_event->disabled = 1;
+ } else {
+ enabled++;
}
*(current_event->fd + cpu) = fd;
@@ -313,7 +317,7 @@ static int perf_init() {
}
}
- return 0;
+ return enabled > 0;
}
static void perf_free(void) {
@@ -1283,7 +1287,6 @@ void parse_command_line(int argc, char **argv) {
}
int main(int argc, char **argv) {
- clocks_init();
nd_log_initialize_for_external_plugins("perf.plugin");
parse_command_line(argc, argv);
@@ -1295,8 +1298,16 @@ int main(int argc, char **argv) {
else if(freq)
collector_error("update frequency %d seconds is too small for PERF. Using %d.", freq, update_every);
- if(unlikely(debug)) fprintf(stderr, "perf.plugin: calling perf_init()\n");
- int perf = !perf_init();
+ if (unlikely(debug))
+ fprintf(stderr, "perf.plugin: calling perf_init()\n");
+
+ if (!perf_init()) {
+ perf_free();
+ collector_info("all perf counters are disabled");
+ fprintf(stdout, "EXIT\n");
+ fflush(stdout);
+ exit(1);
+ }
// ------------------------------------------------------------------------
// the main loop
@@ -1306,27 +1317,30 @@ int main(int argc, char **argv) {
time_t started_t = now_monotonic_sec();
size_t iteration;
- usec_t step = update_every * USEC_PER_SEC;
+
+ int perf = 1;
heartbeat_t hb;
- heartbeat_init(&hb);
+ heartbeat_init(&hb, update_every * USEC_PER_SEC);
for(iteration = 0; 1; iteration++) {
- usec_t dt = heartbeat_next(&hb, step);
+ usec_t dt = heartbeat_next(&hb);
- if(unlikely(netdata_exit)) break;
+ if (unlikely(netdata_exit))
+ break;
- if(unlikely(debug && iteration))
- fprintf(stderr, "perf.plugin: iteration %zu, dt %"PRIu64" usec\n"
- , iteration
- , dt
- );
+ if (unlikely(debug && iteration))
+ fprintf(stderr, "perf.plugin: iteration %zu, dt %" PRIu64 " usec\n", iteration, dt);
if(likely(perf)) {
- if(unlikely(debug)) fprintf(stderr, "perf.plugin: calling perf_collect()\n");
+ if (unlikely(debug))
+ fprintf(stderr, "perf.plugin: calling perf_collect()\n");
+
perf = !perf_collect();
if(likely(perf)) {
- if(unlikely(debug)) fprintf(stderr, "perf.plugin: calling perf_send_metrics()\n");
+ if (unlikely(debug))
+ fprintf(stderr, "perf.plugin: calling perf_send_metrics()\n");
+
perf_send_metrics();
}
}
@@ -1334,7 +1348,8 @@ int main(int argc, char **argv) {
fflush(stdout);
// restart check (14400 seconds)
- if(now_monotonic_sec() - started_t > 14400) break;
+ if (now_monotonic_sec() - started_t > 14400)
+ break;
}
collector_info("process exiting");
diff --git a/src/collectors/plugins.d/README.md b/src/collectors/plugins.d/README.md
deleted file mode 100644
index 6b53dbed6..000000000
--- a/src/collectors/plugins.d/README.md
+++ /dev/null
@@ -1,875 +0,0 @@
-<!--
-title: "External plugins"
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/collectors/plugins.d/README.md"
-sidebar_label: "External plugins"
-learn_status: "Published"
-learn_topic_type: "References"
-learn_rel_path: "Developers/External plugins"
--->
-
-# External plugins
-
-`plugins.d` is the Netdata internal plugin that collects metrics
-from external processes, thus allowing Netdata to use **external plugins**.
-
-## Provided External Plugins
-
-| plugin | language | O/S | description |
-|:------------------------------------------------------------------------------------------------------:|:--------:|:--------------:|:----------------------------------------------------------------------------------------------------------------------------------------|
-| [apps.plugin](/src/collectors/apps.plugin/README.md) | `C` | linux, freebsd | monitors the whole process tree on Linux and FreeBSD and breaks down system resource usage by **process**, **user** and **user group**. |
-| [charts.d.plugin](/src/collectors/charts.d.plugin/README.md) | `BASH` | all | a **plugin orchestrator** for data collection modules written in `BASH` v4+. |
-| [cups.plugin](/src/collectors/cups.plugin/README.md) | `C` | all | monitors **CUPS** |
-| [ebpf.plugin](/src/collectors/ebpf.plugin/README.md) | `C` | linux | monitors different metrics on environments using kernel internal functions. |
-| [go.d.plugin](/src/go/plugin/go.d/README.md) | `GO` | all | collects metrics from the system, applications, or third-party APIs. |
-| [ioping.plugin](/src/collectors/ioping.plugin/README.md) | `C` | all | measures disk latency. |
-| [freeipmi.plugin](/src/collectors/freeipmi.plugin/README.md) | `C` | linux | collects metrics from enterprise hardware sensors, on Linux servers. |
-| [nfacct.plugin](/src/collectors/nfacct.plugin/README.md) | `C` | linux | collects netfilter firewall, connection tracker and accounting metrics using `libmnl` and `libnetfilter_acct`. |
-| [xenstat.plugin](/src/collectors/xenstat.plugin/README.md) | `C` | linux | collects XenServer and XCP-ng metrics using `lxenstat`. |
-| [perf.plugin](/src/collectors/perf.plugin/README.md) | `C` | linux | collects CPU performance metrics using performance monitoring units (PMU). |
-| [python.d.plugin](/src/collectors/python.d.plugin/README.md) | `python` | all | a **plugin orchestrator** for data collection modules written in `python` v2 or v3 (both are supported). |
-| [slabinfo.plugin](/src/collectors/slabinfo.plugin/README.md) | `C` | linux | collects kernel internal cache objects (SLAB) metrics. |
-
-Plugin orchestrators may also be described as **modular plugins**. They are modular since they accept custom made modules to be included. Writing modules for these plugins is easier than accessing the native Netdata API directly. You will find modules already available for each orchestrator under the directory of the particular modular plugin (e.g. under python.d.plugin for the python orchestrator).
-Each of these modular plugins has each own methods for defining modules. Please check the examples and their documentation.
-
-## Motivation
-
-This plugin allows Netdata to use **external plugins** for data collection:
-
-1. external data collection plugins may be written in any computer language.
-
-2. external data collection plugins may use O/S capabilities or `setuid` to
- run with escalated privileges (compared to the `netdata` daemon).
- The communication between the external plugin and Netdata is unidirectional
- (from the plugin to Netdata), so that Netdata cannot manipulate an external
- plugin running with escalated privileges.
-
-## Operation
-
-Each of the external plugins is expected to run forever.
-Netdata will start it when it starts and stop it when it exits.
-
-If the external plugin exits or crashes, Netdata will log an error.
-If the external plugin exits or crashes without pushing metrics to Netdata, Netdata will not start it again.
-
-- Plugins that exit with any value other than zero, will be disabled. Plugins that exit with zero, will be restarted after some time.
-- Plugins may also be disabled by Netdata if they output things that Netdata does not understand.
-
-The `stdout` of external plugins is connected to Netdata to receive metrics,
-with the API defined below.
-
-The `stderr` of external plugins is connected to Netdata's `error.log`.
-
-Plugins can create any number of charts with any number of dimensions each. Each chart can have its own characteristics independently of the others generated by the same plugin. For example, one chart may have an update frequency of 1 second, another may have 5 seconds and a third may have 10 seconds.
-
-## Configuration
-
-Netdata will supply the environment variables `NETDATA_USER_CONFIG_DIR` (for user supplied) and `NETDATA_STOCK_CONFIG_DIR` (for Netdata supplied) configuration files to identify the directory where configuration files are stored. It is up to the plugin to read the configuration it needs.
-
-The `netdata.conf` section `[plugins]` section contains a list of all the plugins found at the system where Netdata runs, with a boolean setting to enable them or not.
-
-Example:
-
-```
-[plugins]
- # enable running new plugins = yes
- # check for new plugins every = 60
-
- # charts.d = yes
- # ioping = yes
- # python.d = yes
-```
-
-The setting `enable running new plugins` sets the default behavior for all external plugins. It can be
-overridden for distinct plugins by modifying the appropriate plugin value configuration to either `yes` or `no`.
-
-The setting `check for new plugins every` sets the interval between scans of the directory
-`/usr/libexec/netdata/plugins.d`. New plugins can be added any time, and Netdata will detect them in a timely manner.
-
-For each of the external plugins enabled, another `netdata.conf` section
-is created, in the form of `[plugin:NAME]`, where `NAME` is the name of the external plugin.
-This section allows controlling the update frequency of the plugin and provide
-additional command line arguments to it.
-
-For example, for `apps.plugin` the following section is available:
-
-```
-[plugin:apps]
- # update every = 1
- # command options =
-```
-
-- `update every` controls the granularity of the external plugin.
-- `command options` allows giving additional command line options to the plugin.
-
-Netdata will provide to the external plugins the environment variable `NETDATA_UPDATE_EVERY`, in seconds (the default is 1). This is the **minimum update frequency** for all charts. A plugin that is updating values more frequently than this, is just wasting resources.
-
-Netdata will call the plugin with just one command line parameter: the number of seconds the user requested this plugin to update its data (by default is also 1).
-
-Other than the above, the plugin configuration is up to the plugin.
-
-Keep in mind, that the user may use Netdata configuration to overwrite chart and dimension parameters. This is transparent to the plugin.
-
-### Autoconfiguration
-
-Plugins should attempt to autoconfigure themselves when possible.
-
-For example, if your plugin wants to monitor `squid`, you can search for it on port `3128` or `8080`. If any succeeds, you can proceed. If it fails you can output an error (on stderr) saying that you cannot find `squid` running and giving instructions about the plugin configuration. Then you can stop (exit with non-zero value), so that Netdata will not attempt to start the plugin again.
-
-## External Plugins API
-
-Any program that can print a few values to its standard output can become a Netdata external plugin.
-
-Netdata parses lines starting with:
-
-- `CHART` - create or update a chart
-- `DIMENSION` - add or update a dimension to the chart just created
-- `VARIABLE` - define a variable (to be used in health calculations)
-- `CLABEL` - add a label to a chart
-- `CLABEL_COMMIT` - commit added labels to the chart
-- `FUNCTION` - define a function that can be called later to execute it
-- `BEGIN` - initialize data collection for a chart
-- `SET` - set the value of a dimension for the initialized chart
-- `END` - complete data collection for the initialized chart
-- `FLUSH` - ignore the last collected values
-- `DISABLE` - disable this plugin
-- `FUNCTION` - define functions
-- `FUNCTION_PROGRESS` - report the progress of a function execution
-- `FUNCTION_RESULT_BEGIN` - to initiate the transmission of function results
-- `FUNCTION_RESULT_END` - to end the transmission of function result
-- `CONFIG` - to define dynamic configuration entities
-
-a single program can produce any number of charts with any number of dimensions each.
-
-Charts can be added any time (not just the beginning).
-
-Netdata may send the following commands to the plugin's `stdin`:
-
-- `FUNCTION` - to call a specific function, with all parameters inline
-- `FUNCTION_PAYLOAD` - to call a specific function, with a payload of parameters
-- `FUNCTION_PAYLOAD_END` - to end the payload of parameters
-- `FUNCTION_CANCEL` - to cancel a running function transaction - no response is required
-- `FUNCTION_PROGRESS` - to report that a user asked the progress of running function call - no response is required
-
-### Command line parameters
-
-The plugin **MUST** accept just **one** parameter: **the number of seconds it is
-expected to update the values for its charts**. The value passed by Netdata
-to the plugin is controlled via its configuration file (so there is no need
-for the plugin to handle this configuration option).
-
-The external plugin can overwrite the update frequency. For example, the server may
-request per second updates, but the plugin may ignore it and update its charts
-every 5 seconds.
-
-### Environment variables
-
-There are a few environment variables that are set by `netdata` and are
-available for the plugin to use.
-
-| variable | description |
-|:--------------------------------:|:-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
-| `NETDATA_USER_CONFIG_DIR` | The directory where all Netdata-related user configuration should be stored. If the plugin requires custom user configuration, this is the place the user has saved it (normally under `/etc/netdata`). |
-| `NETDATA_STOCK_CONFIG_DIR` | The directory where all Netdata -related stock configuration should be stored. If the plugin is shipped with configuration files, this is the place they can be found (normally under `/usr/lib/netdata/conf.d`). |
-| `NETDATA_PLUGINS_DIR` | The directory where all Netdata plugins are stored. |
-| `NETDATA_USER_PLUGINS_DIRS` | The list of directories where custom plugins are stored. |
-| `NETDATA_WEB_DIR` | The directory where the web files of Netdata are saved. |
-| `NETDATA_CACHE_DIR` | The directory where the cache files of Netdata are stored. Use this directory if the plugin requires a place to store data. A new directory should be created for the plugin for this purpose, inside this directory. |
-| `NETDATA_LOG_DIR` | The directory where the log files are stored. By default the `stderr` output of the plugin will be saved in the `error.log` file of Netdata. |
-| `NETDATA_HOST_PREFIX` | This is used in environments where system directories like `/sys` and `/proc` have to be accessed at a different path. |
-| `NETDATA_DEBUG_FLAGS` | This is a number (probably in hex starting with `0x`), that enables certain Netdata debugging features. Check **\[[Tracing Options]]** for more information. |
-| `NETDATA_UPDATE_EVERY` | The minimum number of seconds between chart refreshes. This is like the **internal clock** of Netdata (it is user configurable, defaulting to `1`). There is no meaning for a plugin to update its values more frequently than this number of seconds. |
-| `NETDATA_INVOCATION_ID` | A random UUID in compact form, representing the unique invocation identifier of Netdata. When running under systemd, Netdata uses the `INVOCATION_ID` set by systemd. |
-| `NETDATA_LOG_METHOD` | One of `syslog`, `journal`, `stderr` or `none`, indicating the preferred log method of external plugins. |
-| `NETDATA_LOG_FORMAT` | One of `journal`, `logfmt` or `json`, indicating the format of the logs. Plugins can use the Netdata `systemd-cat-native` command to log always in `journal` format, and have it automatically converted to the format expected by netdata. |
-| `NETDATA_LOG_LEVEL` | One of `emergency`, `alert`, `critical`, `error`, `warning`, `notice`, `info`, `debug`. Plugins are expected to log events with the given priority and the more important ones. |
-| `NETDATA_SYSLOG_FACILITY` | Set only when the `NETDATA_LOG_METHOD` is `syslog`. Possible values are `auth`, `authpriv`, `cron`, `daemon`, `ftp`, `kern`, `lpr`, `mail`, `news`, `syslog`, `user`, `uucp` and `local0` to `local7` |
-| `NETDATA_ERRORS_THROTTLE_PERIOD` | The log throttling period in seconds. |
-| `NETDATA_ERRORS_PER_PERIOD` | The allowed number of log events per period. |
-| `NETDATA_SYSTEMD_JOURNAL_PATH` | When `NETDATA_LOG_METHOD` is set to `journal`, this is the systemd-journald socket path to use. |
-
-### The output of the plugin
-
-The plugin should output instructions for Netdata to its output (`stdout`). Since this uses pipes, please make sure you flush stdout after every iteration.
-
-#### DISABLE
-
-`DISABLE` will disable this plugin. This will prevent Netdata from restarting the plugin. You can also exit with the value `1` to have the same effect.
-
-#### HOST_DEFINE
-
-`HOST_DEFINE` defines a new (or updates an existing) virtual host.
-
-The template is:
-
-> HOST_DEFINE machine_guid hostname
-
-where:
-
-- `machine_guid`
-
- uniquely identifies the host, this is what will be needed to add charts to the host.
-
-- `hostname`
-
- is the hostname of the virtual host
-
-#### HOST_LABEL
-
-`HOST_LABEL` adds a key-value pair to the virtual host labels. It has to be given between `HOST_DEFINE` and `HOST_DEFINE_END`.
-
-The template is:
-
-> HOST_LABEL key value
-
-where:
-
-- `key`
-
- uniquely identifies the key of the label
-
-- `value`
-
- is the value associated with this key
-
-There are a few special keys that are used to define the system information of the monitored system:
-
-- `_cloud_provider_type`
-- `_cloud_instance_type`
-- `_cloud_instance_region`
-- `_os_name`
-- `_os_version`
-- `_kernel_version`
-- `_system_cores`
-- `_system_cpu_freq`
-- `_system_ram_total`
-- `_system_disk_space`
-- `_architecture`
-- `_virtualization`
-- `_container`
-- `_container_detection`
-- `_virt_detection`
-- `_is_k8s_node`
-- `_install_type`
-- `_prebuilt_arch`
-- `_prebuilt_dist`
-
-#### HOST_DEFINE_END
-
-`HOST_DEFINE_END` commits the host information, creating a new host entity, or updating an existing one with the same `machine_guid`.
-
-#### HOST
-
-`HOST` switches data collection between hosts.
-
-The template is:
-
-> HOST machine_guid
-
-where:
-
-- `machine_guid`
-
- is the UUID of the host to switch to. After this command, every other command following it is assumed to be associated with this host.
- Setting machine_guid to `localhost` switches data collection to the local host.
-
-#### CHART
-
-`CHART` defines a new chart.
-
-the template is:
-
-> CHART type.id name title units \[family \[context \[charttype \[priority \[update_every \[options \[plugin [module]]]]]]]]
-
- where:
-
-- `type.id`
-
- uniquely identifies the chart,
- this is what will be needed to add values to the chart
-
- the `type` part controls the menu the charts will appear in
-
-- `name`
-
- is the name that will be presented to the user instead of `id` in `type.id`. This means that only the `id` part of
- `type.id` is changed. When a name has been given, the chart is indexed (and can be referred) as both `type.id` and
- `type.name`. You can set name to `''`, or `null`, or `(null)` to disable it. If a chart with the same name already
- exists, a serial number is automatically attached to the name to avoid naming collisions.
-
-- `title`
-
- the text above the chart
-
-- `units`
-
- the label of the vertical axis of the chart,
- all dimensions added to a chart should have the same units
- of measurement
-
-- `family`
-
- is used to group charts together
- (for example all eth0 charts should say: eth0),
- if empty or missing, the `id` part of `type.id` will be used
-
- this controls the sub-menu on the dashboard
-
-- `context`
-
- the context is giving the template of the chart. For example, if multiple charts present the same information for a different family, they should have the same `context`
-
- this is used for looking up rendering information for the chart (colors, sizes, informational texts) and also apply alerts to it
-
-- `charttype`
-
- one of `line`, `area` or `stacked`,
- if empty or missing, the `line` will be used
-
-- `priority`
-
- is the relative priority of the charts as rendered on the web page,
- lower numbers make the charts appear before the ones with higher numbers,
- if empty or missing, `1000` will be used
-
-- `update_every`
-
- overwrite the update frequency set by the server,
- if empty or missing, the user configured value will be used
-
-- `options`
-
- a space separated list of options, enclosed in quotes. 4 options are currently supported: `obsolete` to mark a chart as obsolete (Netdata will hide it and delete it after some time), `detail` to mark a chart as insignificant (this may be used by dashboards to make the charts smaller, or somehow visualize properly a less important chart), `store_first` to make Netdata store the first collected value, assuming there was an invisible previous value set to zero (this is used by statsd charts - if the first data collected value of incremental dimensions is not zero based, unrealistic spikes will appear with this option set) and `hidden` to perform all operations on a chart, but do not offer it on dashboards (the chart will be send to external databases). `CHART` options have been added in Netdata v1.7 and the `hidden` option was added in 1.10.
-
-- `plugin` and `module`
-
- both are just names that are used to let the user identify the plugin and the module that generated the chart. If `plugin` is unset or empty, Netdata will automatically set the filename of the plugin that generated the chart. `module` has not default.
-
-#### DIMENSION
-
-`DIMENSION` defines a new dimension for the chart
-
-the template is:
-
-> DIMENSION id \[name \[algorithm \[multiplier \[divisor [options]]]]]
-
- where:
-
-- `id`
-
- the `id` of this dimension (it is a text value, not numeric),
- this will be needed later to add values to the dimension
-
- We suggest to avoid using `.` in dimension ids. External databases expect metrics to be `.` separated and people will get confused if a dimension id contains a dot.
-
-- `name`
-
- the name of the dimension as it will appear at the legend of the chart,
- if empty or missing the `id` will be used
-
-- `algorithm`
-
- one of:
-
- - `absolute`
-
- the value is to drawn as-is (interpolated to second boundary),
- if `algorithm` is empty, invalid or missing, `absolute` is used
-
- - `incremental`
-
- the value increases over time,
- the difference from the last value is presented in the chart,
- the server interpolates the value and calculates a per second figure
-
- - `percentage-of-absolute-row`
-
- the % of this value compared to the total of all dimensions
-
- - `percentage-of-incremental-row`
-
- the % of this value compared to the incremental total of
- all dimensions
-
-- `multiplier`
-
- an integer value to multiply the collected value,
- if empty or missing, `1` is used
-
-- `divisor`
-
- an integer value to divide the collected value,
- if empty or missing, `1` is used
-
-- `options`
-
- a space separated list of options, enclosed in quotes. Options supported: `obsolete` to mark a dimension as obsolete (Netdata will delete it after some time) and `hidden` to make this dimension hidden, it will take part in the calculations but will not be presented in the chart.
-
-#### VARIABLE
-
-> VARIABLE [SCOPE] name = value
-
-`VARIABLE` defines a variable that can be used in alerts. This is to used for setting constants (like the max connections a server may accept).
-
-Variables support 2 scopes:
-
-- `GLOBAL` or `HOST` to define the variable at the host level.
-- `LOCAL` or `CHART` to define the variable at the chart level. Use chart-local variables when the same variable may exist for different charts (i.e. Netdata monitors 2 mysql servers, and you need to set the `max_connections` each server accepts). Using chart-local variables is the ideal to build alert templates.
-
-The position of the `VARIABLE` line, sets its default scope (in case you do not specify a scope). So, defining a `VARIABLE` before any `CHART`, or between `END` and `BEGIN` (outside any chart), sets `GLOBAL` scope, while defining a `VARIABLE` just after a `CHART` or a `DIMENSION`, or within the `BEGIN` - `END` block of a chart, sets `LOCAL` scope.
-
-These variables can be set and updated at any point.
-
-Variable names should use alphanumeric characters, the `.` and the `_`.
-
-The `value` is floating point (Netdata used `long double`).
-
-Variables are transferred to upstream Netdata servers (streaming and database replication).
-
-#### CLABEL
-
-> CLABEL name value source
-
-`CLABEL` defines a label used to organize and identify a chart.
-
-Name and value accept characters according to the following table:
-
-| Character | Symbol | Label Name | Label Value |
-|---------------------|:------:|:----------:|:-----------:|
-| UTF-8 character | UTF-8 | _ | keep |
-| Lower case letter | [a-z] | keep | keep |
-| Upper case letter | [A-Z] | keep | [a-z] |
-| Digit | [0-9] | keep | keep |
-| Underscore | _ | keep | keep |
-| Minus | - | keep | keep |
-| Plus | + | _ | keep |
-| Colon | : | _ | keep |
-| Semicolon | ; | _ | : |
-| Equal | = | _ | : |
-| Period | . | keep | keep |
-| Comma | , | . | . |
-| Slash | / | keep | keep |
-| Backslash | \ | / | / |
-| At | @ | _ | keep |
-| Space | ' ' | _ | keep |
-| Opening parenthesis | ( | _ | keep |
-| Closing parenthesis | ) | _ | keep |
-| Anything else | | _ | _ |
-
-The `source` is an integer field that can have the following values:
-- `1`: The value was set automatically.
-- `2`: The value was set manually.
-- `4`: This is a K8 label.
-- `8`: This is a label defined using `netdata` agent cloud link.
-
-#### CLABEL_COMMIT
-
-`CLABEL_COMMIT` indicates that all labels were defined and the chart can be updated.
-
-#### FUNCTION
-
-The plugin can register functions to Netdata, like this:
-
-> FUNCTION [GLOBAL] "name and parameters of the function" timeout "help string for users" "tags" "access"
-
-- Tags currently recognized are either `top` or `logs` (or both, space separated).
-- Access is one of `any`, `member`, or `admin`:
- - `any` to offer the function to all users of Netdata, even if they are not authenticated.
- - `member` to offer the function to all authenticated members of Netdata.
- - `admin` to offer the function only to authenticated administrators.
-
-Users can use a function to ask for more information from the collector. Netdata maintains a registry of functions in 2 levels:
-
-- per node
-- per chart
-
-Both node and chart functions are exactly the same, but chart functions allow Netdata to relate functions with charts and therefore present a context-sensitive menu of functions related to the chart the user is using.
-
-Users can get a list of all the registered functions using the `/api/v1/functions` endpoint of Netdata and call functions using the `/api/v1/function` API call of Netdata.
-
-Once a function is called, the plugin will receive at its standard input a command that looks like this:
-
-```
-FUNCTION transaction_id timeout "name and parameters of the function as one quoted parameter" "user permissions value" "source of request"
-```
-
-When the function to be called is to receive a payload of parameters, the call looks like this:
-
-```
-FUNCTION_PAYLOAD transaction_id timeout "name and parameters of the function as one quoted parameter" "user permissions value" "source of request" "content/type"
-body of the payload, formatted according to content/type
-FUNCTION PAYLOAD END
-```
-
-In this case, Netdata will send:
-
-- A line starting with `FUNCTION_PAYLOAD` together with the required metadata for the function, like the transaction id, the function name and its parameters, the timeout and the content type. This line ends with a newline.
-- Then, the payload itself (which may or may not have newlines in it). The payload should be parsed according to the content type parameter.
-- Finally, a line starting with `FUNCTION_PAYLOAD_END`, so it is expected like `\nFUNCTION_PAYLOAD_END\n`.
-
-Note 1: The plugins.d protocol allows parameters without single or double quotes if they don't contain spaces. However, the plugin should be able to parse parameters even if they are enclosed in single or double quotes. If the first character of a parameter is a single quote, its last character should also be a single quote too, and similarly for double quotes.
-
-Note 2: Netdata always sends the function and its parameters enclosed in double quotes. If the function command and its parameters contain quotes, they are converted to single quotes.
-
-The plugin is expected to parse and validate `name and parameters of the function as one quotes parameter`. Netdata allows the user interface to manipulate this string by appending more parameters.
-
-If the plugin rejects the request, it should respond with this:
-
-```
-FUNCTION_RESULT_BEGIN transaction_id 400 application/json
-{
- "status": 400,
- "error_message": "description of the rejection reasons"
-}
-FUNCTION_RESULT_END
-```
-
-If the plugin prepares a response, it should send (via its standard output, together with the collected data, but not interleaved with them):
-
-```
-FUNCTION_RESULT_BEGIN transaction_id http_response_code content_type expiration
-```
-
-Where:
-
- - `transaction_id` is the transaction id that Netdata sent for this function execution
- - `http_response_code` is the http error code Netdata should respond with, 200 is the "ok" response
- - `content_type` is the content type of the response
- - `expiration` is the absolute timestamp (number, unix epoch) this response expires
-
-Immediately after this, all text is assumed to be the response content.
-The content is text and line oriented. The maximum line length accepted is 15kb. Longer lines will be truncated.
-The type of the context itself depends on the plugin and the UI.
-
-To terminate the message, Netdata seeks a line with just this:
-
-```
-FUNCTION_RESULT_END
-```
-
-This defines the end of the message. `FUNCTION_RESULT_END` should appear in a line alone, without any other text, so it is wise to add `\n` before and after it.
-
-After this line, Netdata resumes processing collected metrics from the plugin.
-
-The maximum uncompressed payload size Netdata will accept is 100MB.
-
-##### Functions cancellation
-
-Netdata is able to detect when a user made an API request, but abandoned it before it was completed. If this happens to an API called for a function served by the plugin, Netdata will generate a `FUNCTION_CANCEL` request to let the plugin know that it can stop processing the query.
-
-After receiving such a command, the plugin **must still send a response for the original function request**, to wake up any waiting threads before they timeout. The http response code is not important, since the response will be discarded, however for auditing reasons we suggest to send back a 499 http response code. This is not a standard response code according to the HTTP protocol, but web servers like `nginx` are using it to indicate that a request was abandoned by a user.
-
-##### Functions progress
-
-When a request takes too long to be processed, Netdata allows the plugin to report progress to Netdata, which in turn will report progress to the caller.
-
-The plugin can send `FUNCTION_PROGRESS` like this:
-
-```
-FUNCTION_PROGRESS transaction_id done all
-```
-
-Where:
-
-- `transaction_id` is the transaction id of the function request
-- `done` is an integer value indicating the amount of work done
-- `all` is an integer value indicating the total amount of work to be done
-
-Netdata supports two kinds of progress:
-- progress as a percentage, which is calculated as `done * 100 / all`
-- progress without knowing the total amount of work to be done, which is enabled when the plugin reports `all` as zero.
-
-##### Functions timeout
-
-All functions calls specify a timeout, at which all the intermediate routing nodes (parents, web server threads) will time out and abort the call.
-
-However, all intermediate routing nodes are configured to extend the timeout when the caller asks for progress. This works like this:
-
-When a progress request is received, if the expected timeout of the request is less than or equal to 10 seconds, the expected timeout is extended by 10 seconds.
-
-Usually, the user interface asks for a progress every second. So, during the last 10 seconds of the timeout, every progress request made shifts the timeout 10 seconds to the future.
-
-To accomplish this, when Netdata receives a progress request by a user, it generates progress requests to the plugin, updating all the intermediate nodes to extend their timeout if necessary.
-
-The plugin will receive progress requests like this:
-
-```
-FUNCTION_PROGRESS transaction_id
-```
-
-There is no need to respond to this command. It is only there to let the plugin know that a user is still waiting for the query to finish.
-
-#### CONFIG
-
-`CONFIG` commands sent from the plugin to Netdata define dynamic configuration entities. These configurable entities are exposed to the user interface, allowing users to change configuration at runtime.
-
-Dynamically configurations made this way are saved to disk by Netdata and are replayed automatically when Netdata or the plugin restarts.
-
-`CONFIG` commands look like this:
-
-```
-CONFIG id action ...
-```
-
-Where:
-
-- `id` is a unique identifier for the configurable entity. This should by design be unique across Netdata. It should be something like `plugin:module:jobs`, e.g. `go.d:postgresql:jobs:masterdb`. This is assumed to be colon-separated with the last part (`masterdb` in our example), being the one displayed to users when there ano conflicts under the same configuration path.
-- `action` can be:
- - `create`, to declare the dynamic configuration entity
- - `delete`, to delete the dynamic configuration entity - this does not delete user configuration, we if an entity with the same id is created in the future, the saved configuration will be given to it.
- - `status`, to update the dynamic configuration entity status
-
-> IMPORTANT:<br/>
-> The plugin should blindly create, delete and update the status of its dynamic configuration entities, without any special logic applied to it. Netdata needs to be updated of what is actually happening at the plugin. Keep in mind that creating dynamic configuration entities triggers responses from Netdata, depending on its type and status. Re-creating a job, triggers the same responses every time, so make sure you create jobs only when you add jobs.
-
-When the `action` is `create`, the following additional parameters are expected:
-
-```
-CONFIG id action status type "path" source_type "source" "supported commands" "view permissions" "edit permissions"
-```
-
-Where:
-
-- `action` should be `create`
-- `status` can be:
- - `accepted`, the plugin accepted the configuration, but it is not running yet.
- - `running`, the plugin accepted and runs the configuration.
- - `failed`, the plugin tries to run the configuration but it fails.
- - `incomplete`, the plugin needs additional settings to run this configuration. This is usually used for the cases the plugin discovered a job, but important information is missing for it to work.
- - `disabled`, the configuration has been disabled by a user.
- - `orphan`, the configuration is not claimed by any plugin. This is used internally by Netdata to mark the configuration nodes available, for which there is no plugin related to them. Do not use in plugins directly.
-- `type` can be `single`, `template` or `job`:
- - `single` is used when the configurable entity is fixed and users should never be able to add or delete it.
- - `template` is used to define a template based on which users can add multiple configurations, like adding data collection jobs. So, the plugin defines the template of the jobs and users are presented with a `[+]` button to add such configuration jobs. The plugin can define multiple templates by giving different `id`s to them.
- - `job` is used to define a job of a template. The plugin should always add all its jobs, independently of the way they have been discovered. It is important to note the relation between `template` and `job` when it comes it the `id`: The `id` of the template should be the prefix of the `job`'s `id`. For example, if the template is `go.d:postgresql:jobs`, then all its jobs be like `go.d:postgresql:jobs:jobname`.
-- `path` is the absolute path of the configurable entity inside the tree of Netdata configurations. Usually, this is should be `/collectors`.
-- `source` can be `internal`, `stock`, `user`, `discovered` or `dyncfg`:
- - `internal` is used for configurations that are based on internal code settings
- - `stock` is used for default configurations
- - `discovered` is used for dynamic configurations the plugin discovers by its own
- - `user` is used for user configurations, usually via a configuration file
- - `dyncfg` is used for configuration received via this dynamic configuration mechanism
-- `source` should provide more details about the exact source of the configuration, like `line@file`, or `user@ip`, etc.
-- `supported_commands` is a space separated list of the following keywords, enclosed in single or double quotes. These commands are used by the user interface to determine the actions the users can take:
- - `schema`, to expose the JSON schema for the user interface. This is mandatory for all configurable entities. When `schema` requests are received, Netdata will first attempt to load the schema from `/etc/netdata/schema.d/` and `/var/lib/netdata/conf.d/schema.d`. For jobs, it will serve the schema of their template. If no schema is found for the required `id`, the `schema` request will be forwarded to the plugin, which is expected to send back the relevant schema.
- - `get`, to expose the current configuration values, according the schema defined. `templates` cannot support `get`, since they don't maintain any data.
- - `update`, to receive configuration updates for this entity. `templates` cannot support `update`, since they don't maintain any data.
- - `test`, like `update` but only test the configuration and report success or failure.
- - `add`, to receive job creation commands for templates. Only `templates` should support this command.
- - `remove`, to remove a configuration. Only `jobs` should support this command.
- - `enable` and `disable`, to receive user requests to enable and disable this entity. Adding only one of `enable` or `disable` to the supported commands, Netdata will add both of them. The plugin should expose these commands on `templates` only when it wants to receive `enable` and `disable` commands for all the `jobs` of this `template`.
- - `restart`, to restart a job.
-- `view permissions` and `edit permissions` are bitmaps of the Netdata permission system to control access to the configuration. If set to zero, Netdata will require a signed in user with view and edit permissions to the Netdata's configuration system.
-
-The plugin receives commands as if it had exposed a `FUNCTION` named `config`. Netdata formats all these calls like this:
-
-```
-config id command
-```
-
-Where `id` is the unique id of the configurable entity and `command` is one of the supported commands the plugin sent to Netdata.
-
-The plugin will receive (for commands: `schema`, `get`, `remove`, `enable`, `disable` and `restart`):
-
-```
-FUNCTION transaction_id timeout "config id command" "user permissions value" "source string"
-```
-
-or (for commands: `update`, `add` and `test`):
-
-```
-FUNCTION_PAYLOAD transaction_id timeout "config id command" "user permissions value" "source string" "content/type"
-body of the payload formatted according to content/type
-FUNCTION_PAYLOAD_END
-```
-
-Once received, the plugin should process it and respond accordingly.
-
-Immediately after the plugin adds a configuration entity, if the commands `enable` and `disable` are supported by it, Netdata will send either `enable` or `disable` for it, based on the last user action, which has been persisted to disk.
-
-Plugin responses follow the same format `FUNCTIONS` do:
-
-```
-FUNCTION_RESULT_BEGIN transaction_id http_response_code content/type expiration
-body of the response formatted according to content/type
-FUNCTION_RESULT_END
-```
-
-Successful responses (HTTP response code 200) to `schema` and `get` should send back the relevant JSON object.
-All other responses should have the following response body:
-
-```json
-{
- "status" : 404,
- "message" : "some text"
-}
-```
-
-The user interface presents the message to users, even when the response is successful (HTTP code 200).
-
-When responding to additions and updates, Netdata uses the following success response codes to derive additional information:
-
-- `200`, responding with 200, means the configuration has been accepted and it is running.
-- `202`, responding with 202, means the configuration has been accepted but it is not yet running. A subsequent `status` action will update it.
-- `298`, responding with 298, means the configuration has been accepted but it is disabled for some reason (probably because it matches nothing or the contents are not useful - use the `message` to provide additional information).
-- `299`, responding with 299, means the configuration has been accepted but a restart is required to apply it.
-
-## Data collection
-
-data collection is defined as a series of `BEGIN` -> `SET` -> `END` lines
-
-> BEGIN type.id [microseconds]
-
-- `type.id`
-
- is the unique identification of the chart (as given in `CHART`)
-
-- `microseconds`
-
- is the number of microseconds since the last update of the chart. It is optional.
-
- Under heavy system load, the system may have some latency transferring
- data from the plugins to Netdata via the pipe. This number improves
- accuracy significantly, since the plugin is able to calculate the
- duration between its iterations better than Netdata.
-
- The first time the plugin is started, no microseconds should be given
- to Netdata.
-
-> SET id = value
-
-- `id`
-
- is the unique identification of the dimension (of the chart just began)
-
-- `value`
-
- is the collected value, only integer values are collected. If you want to push fractional values, multiply this value by 100 or 1000 and set the `DIMENSION` divider to 1000.
-
-> END
-
- END does not take any parameters, it commits the collected values for all dimensions to the chart. If a dimensions was not `SET`, its value will be empty for this commit.
-
-More `SET` lines may appear to update all the dimensions of the chart.
-All of them in one `BEGIN` -> `END` block.
-
-All `SET` lines within a single `BEGIN` -> `END` block have to refer to the
-same chart.
-
-If more charts need to be updated, each chart should have its own
-`BEGIN` -> `SET` -> `END` block.
-
-If, for any reason, a plugin has issued a `BEGIN` but wants to cancel it,
-it can issue a `FLUSH`. The `FLUSH` command will instruct Netdata to ignore
-all the values collected since the last `BEGIN` command.
-
-If a plugin does not behave properly (outputs invalid lines, or does not
-follow these guidelines), will be disabled by Netdata.
-
-### collected values
-
-Netdata will collect any **signed** value in the 64bit range:
-`-9.223.372.036.854.775.808` to `+9.223.372.036.854.775.807`
-
-If a value is not collected, leave it empty, like this:
-
-`SET id =`
-
-or do not output the line at all.
-
-## Modular Plugins
-
-1. **python**, use `python.d.plugin`, there are many examples in the [python.d
- directory](/src/collectors/python.d.plugin/README.md)
-
- python is ideal for Netdata plugins. It is a simple, yet powerful way to collect data, it has a very small memory footprint, although it is not the most CPU efficient way to do it.
-
-2. **BASH**, use `charts.d.plugin`, there are many examples in the [charts.d
- directory](/src/collectors/charts.d.plugin/README.md)
-
- BASH is the simplest scripting language for collecting values. It is the less efficient though in terms of CPU resources. You can use it to collect data quickly, but extensive use of it might use a lot of system resources.
-
-3. **C**
-
- Of course, C is the most efficient way of collecting data. This is why Netdata itself is written in C.
-
-## Writing Plugins Properly
-
-There are a few rules for writing plugins properly:
-
-1. Respect system resources
-
- Pay special attention to efficiency:
-
- - Initialize everything once, at the beginning. Initialization is not an expensive operation. Your plugin will most probably be started once and run forever. So, do whatever heavy operation is needed at the beginning, just once.
- - Do the absolutely minimum while iterating to collect values repeatedly.
- - If you need to connect to another server to collect values, avoid re-connects if possible. Connect just once, with keep-alive (for HTTP) enabled and collect values using the same connection.
- - Avoid any CPU or memory heavy operation while collecting data. If you control memory allocation, avoid any memory allocation while iterating to collect values.
- - Avoid running external commands when possible. If you are writing shell scripts avoid especially pipes (each pipe is another fork, a very expensive operation).
-
-2. The best way to iterate at a constant pace is this pseudo code:
-
-```js
- var update_every = argv[1] * 1000; /* seconds * 1000 = milliseconds */
-
- readConfiguration();
-
- if(!verifyWeCanCollectValues()) {
- print("DISABLE");
- exit(1);
- }
-
- createCharts(); /* print CHART and DIMENSION statements */
-
- var loops = 0;
- var last_run = 0;
- var next_run = 0;
- var dt_since_last_run = 0;
- var now = 0;
-
- while(true) {
- /* find the current time in milliseconds */
- now = currentTimeStampInMilliseconds();
-
- /*
- * find the time of the next loop
- * this makes sure we are always aligned
- * with the Netdata daemon
- */
- next_run = now - (now % update_every) + update_every;
-
- /*
- * wait until it is time
- * it is important to do it in a loop
- * since many wait functions can be interrupted
- */
- while( now < next_run ) {
- sleepMilliseconds(next_run - now);
- now = currentTimeStampInMilliseconds();
- }
-
- /* calculate the time passed since the last run */
- if ( loops > 0 )
- dt_since_last_run = (now - last_run) * 1000; /* in microseconds */
-
- /* prepare for the next loop */
- last_run = now;
- loops++;
-
- /* do your magic here to collect values */
- collectValues();
-
- /* send the collected data to Netdata */
- printValues(dt_since_last_run); /* print BEGIN, SET, END statements */
- }
-```
-
- Using the above procedure, your plugin will be synchronized to start data collection on steps of `update_every`. There will be no need to keep track of latencies in data collection.
-
- Netdata interpolates values to second boundaries, so even if your plugin is not perfectly aligned it does not matter. Netdata will find out. When your plugin works in increments of `update_every`, there will be no gaps in the charts due to the possible cumulative micro-delays in data collection. Gaps will only appear if the data collection is really delayed.
-
-3. If you are not sure of memory leaks, exit every one hour. Netdata will re-start your process.
-
-4. If possible, try to autodetect if your plugin should be enabled, without any configuration.
-
-
diff --git a/src/collectors/plugins.d/functions-table.md b/src/collectors/plugins.d/functions-table.md
deleted file mode 100644
index f3a8bcf36..000000000
--- a/src/collectors/plugins.d/functions-table.md
+++ /dev/null
@@ -1,418 +0,0 @@
-
-> This document is a work in progress.
-
-Plugin functions can support any kind of responses. However, the UI of Netdata has defined some structures as responses it can parse, understand and visualize.
-
-One of these responses is the `table`. This is used in almost all functions implemented today.
-
-# Functions Tables
-
-Tables are defined when `"type": "table"` is set. The following is the standard header that should be available on all `table` responses:
-
-```json
-{
- "type": "table",
- "status": 200,
- "update_every": 1,
- "help": "help text",
- "hostname": "the hostname of the server sending this response, to appear at the title of the UI.",
- "expires": "UNIX epoch timestamp that the response expires",
- "has_history": "boolean: true when the datetime picker plays a role in the result set",
- // rest of the response
-}
-```
-
-## Preflight `info` request
-
-The UI, before making the first call to a function, it does a preflight request to understand what the function supports. The plugin receives this request as a FUNCTION call specifying the `info` parameter (possible among others).
-
-The response from the plugin is expected to have the following:
-
-```json
-{
- // standard table header - as above
- "accepted_params": [ "a", "b", "c", ...],
- "required_params": [
- {
- "id": "the keyword to use when sending / receiving this parameter",
- "name": "the name to present to users for this parameter",
- "help": "a help string to help users understand this parameter",
- "type": "the type of the parameter, either: 'select' or 'multiselect'",
- "options": [
- {
- "id": "the keyword to use when sending / receiving this option",
- "name": "the name to present to users for this option",
- "pill": "a short text to show next to this option as a pill",
- "info": "a longer text to show on a tooltip when the user is hovering this option"
- },
- // more options for this required parameter
- ]
- },
- // more required parameters
- ]
-}
-```
-
-If there are no required parameters, `required_params` can be omitted.
-If there are no accepted parameters, `accepted_params` can be omitted. `accepted_param` can be sent during normal responses to update the UI with a new set of parameters available, between calls.
-
-For `logs`, the UI requires this set of `accepted_params`.
-
-Ref [Pagination](#pagination), [Deltas](#incremental-responses)
-```json
-[
- "info", // boolean: requests the preflight `info` request
- "after", // interval start timestamp
- "before", // interval end timestamp
- "direction", // sort direction [backward,forward]
- "last", // number of records to retrieve
- "anchor", // timestamp to divide records in pages
- "facets",
- "histogram", // selects facet to be used on the histogram
- "if_modified_since", // used in PLAY mode, to indicate that the UI wants data newer than the specified timestamp
- "data_only", // boolean: requests data (logs) only
- "delta", // boolean: requests incremental responses
- "tail",
- "sampling",
- "slice"
-]
-```
-
-If there are `required_params`, the UI by default selects the first option. [](VERIFY_WITH_UI)
-
-## Table data
-
-To define table data, the UI expects this:
-
-```json
-{
- // header
- "columns": {
- "id": {
- "index": "number: the sort order for the columns, lower numbers are first",
- "name": "string: the name of the column as it should be presented to users",
- "unique_key": "boolean: true when the column uniquely identifies the row",
- "visible": "boolean: true when the column should be visible by default",
- "type": "enum: see column types",
- "units": "string: the units of the value, if any - this item can be omitted if the column does not have units [](VERIFY_WITH_UI)",
- "visualization": "enum: see visualization types",
- "value_options": {
- "units": "string: the units of the value [](VERIFY_WITH_UI)",
- "transform": "enum: see transformation types",
- "decimal_points": "number: the number of fractional digits for the number",
- "default_value": "whatever the value is: when the value is null, show this instead"
- },
- "max": "number: when the column is numeric, this is the max value the data have - this is used when range filtering is set and value bars",
- "pointer_to": "id of another field: this is used when detail-string is set, to point to the column this column is detail of",
- "sort": "enum: sorting order",
- "sortable": "boolean: whether the column is sortable by users",
- "sticky": "boolean: whether the column should always be visible in the UI",
- "summary": "string: ???",
- "filter": "enum: the filtering type for this column",
- "full_width": "boolean: the value is expected to get most of the available column space. When multiple columns are full_width, the available space is given to all of them.",
- "wrap": "boolean: true when the entire value should be shown, even when it occupies a big space.",
- "default_expanded_filter": "boolean: true when the filter of this column should be expanded by default.",
- "dummy": "boolean: when set to true, the column is not to be presented to users."
- },
- // more IDs
- },
- "data": [ // array of rows
- [ // array of columns
- // values for each column linked to their "index" in the columns
- ],
- // next row
- ],
- "default_sort_column": "id: the id of the column that should be sorted by default"
-}
-```
-
-**IMPORTANT**
-
-On Data values, `timestamp` column value must be in unix micro.
-
-
-### Sorting order
-
-- `ascending`
-- `descending`
-
-### Transformation types
-
-- `none`, just show the value, without any processing
-- `number`, just show a number with its units, respecting `decimal_points`
-- `duration`, makes the UI show a human readable duration, of the seconds given
-- `datetime`, makes the UI show a human readable datetime of the timestamp in UNIX epoch
-- `datetime_usec`, makes the UI show a human readable datetime of the timestamp in USEC UNIX epoch
-
-### Visualization types
-
-- `value`
-- `bar`
-- `pill`
-- `richValue`, this is not used yet, it is supposed to be a structure that will provide a value and options for it
-- `rowOptions`, defines options for the entire row - this column is hidden from the UI
-
-### rowOptions
-
-TBD
-
-### Column types
-
-- `none`
-- `integer`
-- `boolean`
-- `string`
-- `detail-string`
-- `bar-with-integer`
-- `duration`
-- `timestamp`
-- `array`
-
-### Filter types
-
-- `none`, this facet is not selectable by users
-- `multiselect`, the user can select any number of the available options
-- `facet`, similar to `multiselect`, but it also indicates that the column has been indexed and has values with counters. Columns set to `facet` must appear in the `facets` list.
-- `range`, the user can select a range of values (numeric)
-
-The plugin may send non visible columns with filter type `facet`. This means that the plugin can enable indexing on these columns, but it has not done it. Then the UI may send `facets:{ID1},{ID2},{ID3},...` to enable indexing of the columns specified.
-
-What is the default?
-
-#### Facets
-
-Facets are a special case of `multiselect` fields. They are used to provide additional information about each possible value, including their relative sort order and the number of times each value appears in the result set. Facets are filters handled by the plugin. So, the plugin will receive user selected filter like: `{KEY}:{VALUE1},{VALUE2},...`, where `{KEY}` is the id of the column and `{VALUEX}` is the id the facet option the user selected.
-
-```json
-{
- // header,
- "columns": ...,
- "data": ...,
- "facets": [
- {
- "id": "string: the unique id of the facet",
- "name": "string: the human readable name of the facet",
- "order": "integer: the sorting order of this facet - lower numbers move items above others"
- "options": [
- {
- "id": "string: the unique id of the facet value",
- "name": "string: the human readable version of the facet value",
- "count": "integer: the number of times this value appears in the result set",
- "order": "integer: the sorting order of this facet value - lower numbers move items above others"
- },
- // next option
- ],
- },
- // next facet
- ]
-}
-```
-
-## Charts
-
-```json
-{
- // header,
- "charts": {
-
- },
- "default_charts": [
-
- ]
-}
-```
-
-
-## Histogram
-
-```json
-{
- "available_histograms": [
- {
- "id": "string: the unique id of the histogram",
- "name": "string: the human readable name of the histogram",
- "order": "integer: the sorting order of available histograms - lower numbers move items above others"
- }
- ],
- "histogram": {
- "id": "string: the unique id of the histogram",
- "name": "string: the human readable name of the histogram",
- "chart": {
- "summary": {
- "nodes": [
- {
- "mg": "string",
- "nm": "string: node name",
- "ni": "integer: node index"
- }
- ],
- "contexts": [
- {
- "id": "string: context id"
- }
- ],
- "instances": [
- {
- "id": "string: instance id",
- "ni": "integer: instance index"
- }
- ],
- "dimensions": [
- {
- "id": "string: dimension id",
- "pri": "integer",
- "sts": {
- "min": "float: dimension min value",
- "max": "float: dimension max value",
- "avg": "float: dimension avarage value",
- "arp": "float",
- "con": "float"
- }
- }
- ]
- },
- "result": {
- "labels": [
- // histogram labels
- ],
- "point": {
- "value": "integer",
- "arp": "integer",
- "pa": "integer"
- },
- "data": [
- [
- "timestamp" // unix milli
- // one array per label
- [
- // values
- ],
- ]
- ]
- },
- "view": {
- "title": "string: histogram tittle",
- "update_every": "integer",
- "after": "timestamp: histogram window start",
- "before": "timestamp: histogram window end",
- "units": "string: histogram units",
- "chart_type": "string: histogram chart type",
- "min": "integer: histogram min value",
- "max": "integer: histogram max value",
- "dimensions": {
- "grouped_by": [
- // "string: histogram grouped by",
- ],
- "ids": [
- // "string: histogram label id",
- ],
- "names": [
- // "string: histogram human readable label name",
- ],
- "colors": [],
- "units": [
- // "string: histogram label unit",
- ],
- "sts": {
- "min": [
- // "float: label min value",
- ],
- "max": [
- // "float: label max value",
- ],
- "avg": [
- // "float: label avarage value",
- ],
- "arp": [
- // "float",
- ],
- "con": [
- // "float",
- ]
- }
- }
- },
- "totals": {
- "nodes": {
- "sl": "integer",
- "qr": "integer"
- },
- "contexts": {
- "sl": "integer",
- "qr": "integer"
- },
- "instances": {
- "sl": "integer",
- "qr": "integer"
- },
- "dimensions": {
- "sl": "integer",
- "qr": "integer"
- }
- },
- "db": {
- "update_every": "integer"
- }
- }
- }
-}
-```
-
-**IMPORTANT**
-
-On Result Data, `timestamps` must be in unix milli.
-
-## Grouping
-
-```json
-{
- // header,
- "group_by": {
-
- }
-}
-```
-
-## Datetime picker
-
-When `has_history: true`, the plugin must accept `after:TIMESTAMP_IN_SECONDS` and `before:TIMESTAMP_IN_SECONDS` parameters.
-The plugin can also turn pagination on, so that only a small set of the data are sent to the UI at a time.
-
-
-## Pagination
-
-The UI supports paginating results when `has_history: true`. So, when the result depends on the datetime picker and it is too big to be sent to the UI in one response, the plugin can enable datetime pagination like this:
-
-```json
-{
- // header,
- "columns": ...,
- "data": ...,
- "has_history": true,
- "pagination": {
- "enabled": "boolean: true to enable it",
- "column": "string: the column id that is used for pagination",
- "key": "string: the accepted_param that is used as the pagination anchor",
- "units": "enum: a transformation of the datetime picker to make it compatible with the anchor: timestamp, timestamp_usec"
- }
-}
-```
-
-Once pagination is enabled, the plugin must support the following parameters:
-
-- `{ANCHOR}:{VALUE}`, `{ANCHOR}` is the `pagination.key`, `{VALUE}` is the point the user wants to see entries at, formatted according to `pagination.units`.
-- `direction:backward` or `direction:forward` to specify if the data to be returned if before are after the anchor.
-- `last:NUMER`, the number of entries the plugin should return in the table data.
-- `query:STRING`, the full text search string the user wants to search for.
-- `if_modified_since:TIMESTAMP_USEC` and `tail:true`, used in PLAY mode, to indicate that the UI wants data newer than the specified timestamp. If there are no new data, the plugin must respond with 304 (Not Modified).
-
-### Incremental Responses
-
-- `delta:true` or `delta:false`, when the plugin supports incremental queries, it can accept the parameter `delta`. When set to true, the response of the plugin will be "added" to the previous response already available. This is used in combination with `if_modified_since` to optimize the amount of work the plugin has to do to respond.
-
-
-### Other
-
-- `slice:BOOLEAN` [](VERIFY_WITH_UI)
-- `sampling:NUMBER`
-
diff --git a/src/collectors/plugins.d/gperf-config.txt b/src/collectors/plugins.d/gperf-config.txt
deleted file mode 100644
index 721b771b7..000000000
--- a/src/collectors/plugins.d/gperf-config.txt
+++ /dev/null
@@ -1,112 +0,0 @@
-%{
-
-#define PLUGINSD_KEYWORD_ID_FLUSH 97
-#define PLUGINSD_KEYWORD_ID_DISABLE 98
-#define PLUGINSD_KEYWORD_ID_EXIT 99
-#define PLUGINSD_KEYWORD_ID_HOST 71
-#define PLUGINSD_KEYWORD_ID_HOST_DEFINE 72
-#define PLUGINSD_KEYWORD_ID_HOST_DEFINE_END 73
-#define PLUGINSD_KEYWORD_ID_HOST_LABEL 74
-
-#define PLUGINSD_KEYWORD_ID_BEGIN 12
-#define PLUGINSD_KEYWORD_ID_CHART 32
-#define PLUGINSD_KEYWORD_ID_CLABEL 34
-#define PLUGINSD_KEYWORD_ID_CLABEL_COMMIT 35
-#define PLUGINSD_KEYWORD_ID_DIMENSION 31
-#define PLUGINSD_KEYWORD_ID_END 13
-#define PLUGINSD_KEYWORD_ID_FUNCTION 41
-#define PLUGINSD_KEYWORD_ID_FUNCTION_RESULT_BEGIN 42
-#define PLUGINSD_KEYWORD_ID_FUNCTION_PROGRESS 43
-#define PLUGINSD_KEYWORD_ID_LABEL 51
-#define PLUGINSD_KEYWORD_ID_OVERWRITE 52
-#define PLUGINSD_KEYWORD_ID_SET 11
-#define PLUGINSD_KEYWORD_ID_VARIABLE 53
-#define PLUGINSD_KEYWORD_ID_CONFIG 100
-
-#define PLUGINSD_KEYWORD_ID_CLAIMED_ID 61
-#define PLUGINSD_KEYWORD_ID_BEGIN2 2
-#define PLUGINSD_KEYWORD_ID_SET2 1
-#define PLUGINSD_KEYWORD_ID_END2 3
-
-#define PLUGINSD_KEYWORD_ID_CHART_DEFINITION_END 33
-#define PLUGINSD_KEYWORD_ID_RBEGIN 22
-#define PLUGINSD_KEYWORD_ID_RDSTATE 23
-#define PLUGINSD_KEYWORD_ID_REND 25
-#define PLUGINSD_KEYWORD_ID_RSET 21
-#define PLUGINSD_KEYWORD_ID_RSSTATE 24
-
-#define PLUGINSD_KEYWORD_ID_DYNCFG_ENABLE 901
-#define PLUGINSD_KEYWORD_ID_DYNCFG_REGISTER_MODULE 902
-#define PLUGINSD_KEYWORD_ID_DYNCFG_REGISTER_JOB 903
-#define PLUGINSD_KEYWORD_ID_DYNCFG_RESET 904
-#define PLUGINSD_KEYWORD_ID_REPORT_JOB_STATUS 905
-#define PLUGINSD_KEYWORD_ID_DELETE_JOB 906
-
-%}
-
-%struct-type
-%omit-struct-type
-%define hash-function-name gperf_keyword_hash_function
-%define lookup-function-name gperf_lookup_keyword
-%define word-array-name gperf_keywords
-%define constants-prefix GPERF_PARSER_
-%define slot-name keyword
-%define initializer-suffix ,0,PARSER_INIT_PLUGINSD,0
-%global-table
-%readonly-tables
-%null-strings
-PARSER_KEYWORD;
-
-%%
-#
-# Plugins Only Keywords
-#
-FLUSH, PLUGINSD_KEYWORD_ID_FLUSH, PARSER_INIT_PLUGINSD, WORKER_PARSER_FIRST_JOB + 1
-DISABLE, PLUGINSD_KEYWORD_ID_DISABLE, PARSER_INIT_PLUGINSD, WORKER_PARSER_FIRST_JOB + 2
-EXIT, PLUGINSD_KEYWORD_ID_EXIT, PARSER_INIT_PLUGINSD, WORKER_PARSER_FIRST_JOB + 3
-HOST, PLUGINSD_KEYWORD_ID_HOST, PARSER_INIT_PLUGINSD|PARSER_REP_METADATA, WORKER_PARSER_FIRST_JOB + 4
-HOST_DEFINE, PLUGINSD_KEYWORD_ID_HOST_DEFINE, PARSER_INIT_PLUGINSD|PARSER_REP_METADATA, WORKER_PARSER_FIRST_JOB + 5
-HOST_DEFINE_END, PLUGINSD_KEYWORD_ID_HOST_DEFINE_END, PARSER_INIT_PLUGINSD|PARSER_REP_METADATA, WORKER_PARSER_FIRST_JOB + 6
-HOST_LABEL, PLUGINSD_KEYWORD_ID_HOST_LABEL, PARSER_INIT_PLUGINSD|PARSER_REP_METADATA, WORKER_PARSER_FIRST_JOB + 7
-#
-# Common keywords
-#
-BEGIN, PLUGINSD_KEYWORD_ID_BEGIN, PARSER_INIT_PLUGINSD|PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 8
-CHART, PLUGINSD_KEYWORD_ID_CHART, PARSER_INIT_PLUGINSD|PARSER_INIT_STREAMING|PARSER_REP_METADATA, WORKER_PARSER_FIRST_JOB + 9
-CLABEL, PLUGINSD_KEYWORD_ID_CLABEL, PARSER_INIT_PLUGINSD|PARSER_INIT_STREAMING|PARSER_REP_METADATA, WORKER_PARSER_FIRST_JOB + 10
-CLABEL_COMMIT, PLUGINSD_KEYWORD_ID_CLABEL_COMMIT, PARSER_INIT_PLUGINSD|PARSER_INIT_STREAMING|PARSER_REP_METADATA, WORKER_PARSER_FIRST_JOB + 11
-DIMENSION, PLUGINSD_KEYWORD_ID_DIMENSION, PARSER_INIT_PLUGINSD|PARSER_INIT_STREAMING|PARSER_REP_METADATA, WORKER_PARSER_FIRST_JOB + 12
-END, PLUGINSD_KEYWORD_ID_END, PARSER_INIT_PLUGINSD|PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 13
-FUNCTION, PLUGINSD_KEYWORD_ID_FUNCTION, PARSER_INIT_PLUGINSD|PARSER_INIT_STREAMING|PARSER_REP_METADATA, WORKER_PARSER_FIRST_JOB + 14
-FUNCTION_RESULT_BEGIN, PLUGINSD_KEYWORD_ID_FUNCTION_RESULT_BEGIN, PARSER_INIT_PLUGINSD|PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 15
-FUNCTION_PROGRESS, PLUGINSD_KEYWORD_ID_FUNCTION_PROGRESS, PARSER_INIT_PLUGINSD|PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 16
-LABEL, PLUGINSD_KEYWORD_ID_LABEL, PARSER_INIT_PLUGINSD|PARSER_INIT_STREAMING|PARSER_REP_METADATA, WORKER_PARSER_FIRST_JOB + 17
-OVERWRITE, PLUGINSD_KEYWORD_ID_OVERWRITE, PARSER_INIT_PLUGINSD|PARSER_INIT_STREAMING|PARSER_REP_METADATA, WORKER_PARSER_FIRST_JOB + 18
-SET, PLUGINSD_KEYWORD_ID_SET, PARSER_INIT_PLUGINSD|PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 19
-VARIABLE, PLUGINSD_KEYWORD_ID_VARIABLE, PARSER_INIT_PLUGINSD|PARSER_INIT_STREAMING|PARSER_REP_METADATA, WORKER_PARSER_FIRST_JOB + 20
-CONFIG, PLUGINSD_KEYWORD_ID_CONFIG, PARSER_INIT_PLUGINSD|PARSER_REP_METADATA, WORKER_PARSER_FIRST_JOB + 21
-#
-# Streaming only keywords
-#
-CLAIMED_ID, PLUGINSD_KEYWORD_ID_CLAIMED_ID, PARSER_INIT_STREAMING|PARSER_REP_METADATA, WORKER_PARSER_FIRST_JOB + 22
-BEGIN2, PLUGINSD_KEYWORD_ID_BEGIN2, PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 23
-SET2, PLUGINSD_KEYWORD_ID_SET2, PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 24
-END2, PLUGINSD_KEYWORD_ID_END2, PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 25
-#
-# Streaming Replication keywords
-#
-CHART_DEFINITION_END, PLUGINSD_KEYWORD_ID_CHART_DEFINITION_END, PARSER_INIT_STREAMING|PARSER_REP_METADATA, WORKER_PARSER_FIRST_JOB + 26
-RBEGIN, PLUGINSD_KEYWORD_ID_RBEGIN, PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 27
-RDSTATE, PLUGINSD_KEYWORD_ID_RDSTATE, PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 28
-REND, PLUGINSD_KEYWORD_ID_REND, PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 29
-RSET, PLUGINSD_KEYWORD_ID_RSET, PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 30
-RSSTATE, PLUGINSD_KEYWORD_ID_RSSTATE, PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 31
-#
-# obsolete - do nothing commands
-#
-DYNCFG_ENABLE, PLUGINSD_KEYWORD_ID_DYNCFG_ENABLE, PARSER_INIT_PLUGINSD|PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 32
-DYNCFG_REGISTER_MODULE, PLUGINSD_KEYWORD_ID_DYNCFG_REGISTER_MODULE, PARSER_INIT_PLUGINSD|PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 33
-DYNCFG_REGISTER_JOB, PLUGINSD_KEYWORD_ID_DYNCFG_REGISTER_JOB, PARSER_INIT_PLUGINSD|PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 34
-DYNCFG_RESET, PLUGINSD_KEYWORD_ID_DYNCFG_RESET, PARSER_INIT_PLUGINSD|PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 35
-REPORT_JOB_STATUS, PLUGINSD_KEYWORD_ID_REPORT_JOB_STATUS, PARSER_INIT_PLUGINSD|PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 36
-DELETE_JOB, PLUGINSD_KEYWORD_ID_DELETE_JOB, PARSER_INIT_PLUGINSD|PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 37
diff --git a/src/collectors/plugins.d/gperf-hashtable.h b/src/collectors/plugins.d/gperf-hashtable.h
deleted file mode 100644
index 315e2f7c7..000000000
--- a/src/collectors/plugins.d/gperf-hashtable.h
+++ /dev/null
@@ -1,237 +0,0 @@
-/* ANSI-C code produced by gperf version 3.1 */
-/* Command-line: gperf --multiple-iterations=1000 --output-file=gperf-hashtable.h gperf-config.txt */
-/* Computed positions: -k'1-2' */
-
-#if !((' ' == 32) && ('!' == 33) && ('"' == 34) && ('#' == 35) \
- && ('%' == 37) && ('&' == 38) && ('\'' == 39) && ('(' == 40) \
- && (')' == 41) && ('*' == 42) && ('+' == 43) && (',' == 44) \
- && ('-' == 45) && ('.' == 46) && ('/' == 47) && ('0' == 48) \
- && ('1' == 49) && ('2' == 50) && ('3' == 51) && ('4' == 52) \
- && ('5' == 53) && ('6' == 54) && ('7' == 55) && ('8' == 56) \
- && ('9' == 57) && (':' == 58) && (';' == 59) && ('<' == 60) \
- && ('=' == 61) && ('>' == 62) && ('?' == 63) && ('A' == 65) \
- && ('B' == 66) && ('C' == 67) && ('D' == 68) && ('E' == 69) \
- && ('F' == 70) && ('G' == 71) && ('H' == 72) && ('I' == 73) \
- && ('J' == 74) && ('K' == 75) && ('L' == 76) && ('M' == 77) \
- && ('N' == 78) && ('O' == 79) && ('P' == 80) && ('Q' == 81) \
- && ('R' == 82) && ('S' == 83) && ('T' == 84) && ('U' == 85) \
- && ('V' == 86) && ('W' == 87) && ('X' == 88) && ('Y' == 89) \
- && ('Z' == 90) && ('[' == 91) && ('\\' == 92) && (']' == 93) \
- && ('^' == 94) && ('_' == 95) && ('a' == 97) && ('b' == 98) \
- && ('c' == 99) && ('d' == 100) && ('e' == 101) && ('f' == 102) \
- && ('g' == 103) && ('h' == 104) && ('i' == 105) && ('j' == 106) \
- && ('k' == 107) && ('l' == 108) && ('m' == 109) && ('n' == 110) \
- && ('o' == 111) && ('p' == 112) && ('q' == 113) && ('r' == 114) \
- && ('s' == 115) && ('t' == 116) && ('u' == 117) && ('v' == 118) \
- && ('w' == 119) && ('x' == 120) && ('y' == 121) && ('z' == 122) \
- && ('{' == 123) && ('|' == 124) && ('}' == 125) && ('~' == 126))
-/* The character set is not based on ISO-646. */
-#error "gperf generated tables don't work with this execution character set. Please report a bug to <bug-gperf@gnu.org>."
-#endif
-
-#line 1 "gperf-config.txt"
-
-
-#define PLUGINSD_KEYWORD_ID_FLUSH 97
-#define PLUGINSD_KEYWORD_ID_DISABLE 98
-#define PLUGINSD_KEYWORD_ID_EXIT 99
-#define PLUGINSD_KEYWORD_ID_HOST 71
-#define PLUGINSD_KEYWORD_ID_HOST_DEFINE 72
-#define PLUGINSD_KEYWORD_ID_HOST_DEFINE_END 73
-#define PLUGINSD_KEYWORD_ID_HOST_LABEL 74
-
-#define PLUGINSD_KEYWORD_ID_BEGIN 12
-#define PLUGINSD_KEYWORD_ID_CHART 32
-#define PLUGINSD_KEYWORD_ID_CLABEL 34
-#define PLUGINSD_KEYWORD_ID_CLABEL_COMMIT 35
-#define PLUGINSD_KEYWORD_ID_DIMENSION 31
-#define PLUGINSD_KEYWORD_ID_END 13
-#define PLUGINSD_KEYWORD_ID_FUNCTION 41
-#define PLUGINSD_KEYWORD_ID_FUNCTION_RESULT_BEGIN 42
-#define PLUGINSD_KEYWORD_ID_FUNCTION_PROGRESS 43
-#define PLUGINSD_KEYWORD_ID_LABEL 51
-#define PLUGINSD_KEYWORD_ID_OVERWRITE 52
-#define PLUGINSD_KEYWORD_ID_SET 11
-#define PLUGINSD_KEYWORD_ID_VARIABLE 53
-#define PLUGINSD_KEYWORD_ID_CONFIG 100
-
-#define PLUGINSD_KEYWORD_ID_CLAIMED_ID 61
-#define PLUGINSD_KEYWORD_ID_BEGIN2 2
-#define PLUGINSD_KEYWORD_ID_SET2 1
-#define PLUGINSD_KEYWORD_ID_END2 3
-
-#define PLUGINSD_KEYWORD_ID_CHART_DEFINITION_END 33
-#define PLUGINSD_KEYWORD_ID_RBEGIN 22
-#define PLUGINSD_KEYWORD_ID_RDSTATE 23
-#define PLUGINSD_KEYWORD_ID_REND 25
-#define PLUGINSD_KEYWORD_ID_RSET 21
-#define PLUGINSD_KEYWORD_ID_RSSTATE 24
-
-#define PLUGINSD_KEYWORD_ID_DYNCFG_ENABLE 901
-#define PLUGINSD_KEYWORD_ID_DYNCFG_REGISTER_MODULE 902
-#define PLUGINSD_KEYWORD_ID_DYNCFG_REGISTER_JOB 903
-#define PLUGINSD_KEYWORD_ID_DYNCFG_RESET 904
-#define PLUGINSD_KEYWORD_ID_REPORT_JOB_STATUS 905
-#define PLUGINSD_KEYWORD_ID_DELETE_JOB 906
-
-
-#define GPERF_PARSER_TOTAL_KEYWORDS 37
-#define GPERF_PARSER_MIN_WORD_LENGTH 3
-#define GPERF_PARSER_MAX_WORD_LENGTH 22
-#define GPERF_PARSER_MIN_HASH_VALUE 7
-#define GPERF_PARSER_MAX_HASH_VALUE 52
-/* maximum key range = 46, duplicates = 0 */
-
-#ifdef __GNUC__
-__inline
-#else
-#ifdef __cplusplus
-inline
-#endif
-#endif
-static unsigned int
-gperf_keyword_hash_function (register const char *str, register size_t len)
-{
- static const unsigned char asso_values[] =
- {
- 53, 53, 53, 53, 53, 53, 53, 53, 53, 53,
- 53, 53, 53, 53, 53, 53, 53, 53, 53, 53,
- 53, 53, 53, 53, 53, 53, 53, 53, 53, 53,
- 53, 53, 53, 53, 53, 53, 53, 53, 53, 53,
- 53, 53, 53, 53, 53, 53, 53, 53, 53, 53,
- 53, 53, 53, 53, 53, 53, 53, 53, 53, 53,
- 53, 53, 53, 53, 53, 6, 24, 3, 9, 6,
- 0, 53, 3, 27, 53, 53, 33, 53, 42, 0,
- 53, 53, 0, 30, 53, 12, 3, 53, 9, 0,
- 53, 53, 53, 53, 53, 53, 53, 53, 53, 53,
- 53, 53, 53, 53, 53, 53, 53, 53, 53, 53,
- 53, 53, 53, 53, 53, 53, 53, 53, 53, 53,
- 53, 53, 53, 53, 53, 53, 53, 53, 53, 53,
- 53, 53, 53, 53, 53, 53, 53, 53, 53, 53,
- 53, 53, 53, 53, 53, 53, 53, 53, 53, 53,
- 53, 53, 53, 53, 53, 53, 53, 53, 53, 53,
- 53, 53, 53, 53, 53, 53, 53, 53, 53, 53,
- 53, 53, 53, 53, 53, 53, 53, 53, 53, 53,
- 53, 53, 53, 53, 53, 53, 53, 53, 53, 53,
- 53, 53, 53, 53, 53, 53, 53, 53, 53, 53,
- 53, 53, 53, 53, 53, 53, 53, 53, 53, 53,
- 53, 53, 53, 53, 53, 53, 53, 53, 53, 53,
- 53, 53, 53, 53, 53, 53, 53, 53, 53, 53,
- 53, 53, 53, 53, 53, 53, 53, 53, 53, 53,
- 53, 53, 53, 53, 53, 53, 53, 53, 53, 53,
- 53, 53, 53, 53, 53, 53
- };
- return len + asso_values[(unsigned char)str[1]] + asso_values[(unsigned char)str[0]];
-}
-
-static const PARSER_KEYWORD gperf_keywords[] =
- {
- {(char*)0,0,PARSER_INIT_PLUGINSD,0},
- {(char*)0,0,PARSER_INIT_PLUGINSD,0},
- {(char*)0,0,PARSER_INIT_PLUGINSD,0},
- {(char*)0,0,PARSER_INIT_PLUGINSD,0},
- {(char*)0,0,PARSER_INIT_PLUGINSD,0},
- {(char*)0,0,PARSER_INIT_PLUGINSD,0},
- {(char*)0,0,PARSER_INIT_PLUGINSD,0},
-#line 67 "gperf-config.txt"
- {"HOST", PLUGINSD_KEYWORD_ID_HOST, PARSER_INIT_PLUGINSD|PARSER_REP_METADATA, WORKER_PARSER_FIRST_JOB + 4},
- {(char*)0,0,PARSER_INIT_PLUGINSD,0},
-#line 87 "gperf-config.txt"
- {"CONFIG", PLUGINSD_KEYWORD_ID_CONFIG, PARSER_INIT_PLUGINSD|PARSER_REP_METADATA, WORKER_PARSER_FIRST_JOB + 21},
-#line 101 "gperf-config.txt"
- {"REND", PLUGINSD_KEYWORD_ID_REND, PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 29},
-#line 75 "gperf-config.txt"
- {"CHART", PLUGINSD_KEYWORD_ID_CHART, PARSER_INIT_PLUGINSD|PARSER_INIT_STREAMING|PARSER_REP_METADATA, WORKER_PARSER_FIRST_JOB + 9},
-#line 84 "gperf-config.txt"
- {"OVERWRITE", PLUGINSD_KEYWORD_ID_OVERWRITE, PARSER_INIT_PLUGINSD|PARSER_INIT_STREAMING|PARSER_REP_METADATA, WORKER_PARSER_FIRST_JOB + 18},
-#line 70 "gperf-config.txt"
- {"HOST_LABEL", PLUGINSD_KEYWORD_ID_HOST_LABEL, PARSER_INIT_PLUGINSD|PARSER_REP_METADATA, WORKER_PARSER_FIRST_JOB + 7},
-#line 68 "gperf-config.txt"
- {"HOST_DEFINE", PLUGINSD_KEYWORD_ID_HOST_DEFINE, PARSER_INIT_PLUGINSD|PARSER_REP_METADATA, WORKER_PARSER_FIRST_JOB + 5},
- {(char*)0,0,PARSER_INIT_PLUGINSD,0},
-#line 100 "gperf-config.txt"
- {"RDSTATE", PLUGINSD_KEYWORD_ID_RDSTATE, PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 28},
-#line 86 "gperf-config.txt"
- {"VARIABLE", PLUGINSD_KEYWORD_ID_VARIABLE, PARSER_INIT_PLUGINSD|PARSER_INIT_STREAMING|PARSER_REP_METADATA, WORKER_PARSER_FIRST_JOB + 20},
-#line 69 "gperf-config.txt"
- {"HOST_DEFINE_END", PLUGINSD_KEYWORD_ID_HOST_DEFINE_END, PARSER_INIT_PLUGINSD|PARSER_REP_METADATA, WORKER_PARSER_FIRST_JOB + 6},
-#line 66 "gperf-config.txt"
- {"EXIT", PLUGINSD_KEYWORD_ID_EXIT, PARSER_INIT_PLUGINSD, WORKER_PARSER_FIRST_JOB + 3},
-#line 80 "gperf-config.txt"
- {"FUNCTION", PLUGINSD_KEYWORD_ID_FUNCTION, PARSER_INIT_PLUGINSD|PARSER_INIT_STREAMING|PARSER_REP_METADATA, WORKER_PARSER_FIRST_JOB + 14},
-#line 110 "gperf-config.txt"
- {"DYNCFG_RESET", PLUGINSD_KEYWORD_ID_DYNCFG_RESET, PARSER_INIT_PLUGINSD|PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 35},
-#line 107 "gperf-config.txt"
- {"DYNCFG_ENABLE", PLUGINSD_KEYWORD_ID_DYNCFG_ENABLE, PARSER_INIT_PLUGINSD|PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 32},
-#line 111 "gperf-config.txt"
- {"REPORT_JOB_STATUS", PLUGINSD_KEYWORD_ID_REPORT_JOB_STATUS, PARSER_INIT_PLUGINSD|PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 36},
- {(char*)0,0,PARSER_INIT_PLUGINSD,0},
-#line 112 "gperf-config.txt"
- {"DELETE_JOB", PLUGINSD_KEYWORD_ID_DELETE_JOB, PARSER_INIT_PLUGINSD|PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 37},
-#line 98 "gperf-config.txt"
- {"CHART_DEFINITION_END", PLUGINSD_KEYWORD_ID_CHART_DEFINITION_END, PARSER_INIT_STREAMING|PARSER_REP_METADATA, WORKER_PARSER_FIRST_JOB + 26},
- {(char*)0,0,PARSER_INIT_PLUGINSD,0},
-#line 109 "gperf-config.txt"
- {"DYNCFG_REGISTER_JOB", PLUGINSD_KEYWORD_ID_DYNCFG_REGISTER_JOB, PARSER_INIT_PLUGINSD|PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 34},
-#line 82 "gperf-config.txt"
- {"FUNCTION_PROGRESS", PLUGINSD_KEYWORD_ID_FUNCTION_PROGRESS, PARSER_INIT_PLUGINSD|PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 16},
-#line 99 "gperf-config.txt"
- {"RBEGIN", PLUGINSD_KEYWORD_ID_RBEGIN, PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 27},
-#line 108 "gperf-config.txt"
- {"DYNCFG_REGISTER_MODULE", PLUGINSD_KEYWORD_ID_DYNCFG_REGISTER_MODULE, PARSER_INIT_PLUGINSD|PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 33},
- {(char*)0,0,PARSER_INIT_PLUGINSD,0},
-#line 81 "gperf-config.txt"
- {"FUNCTION_RESULT_BEGIN", PLUGINSD_KEYWORD_ID_FUNCTION_RESULT_BEGIN, PARSER_INIT_PLUGINSD|PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 15},
-#line 102 "gperf-config.txt"
- {"RSET", PLUGINSD_KEYWORD_ID_RSET, PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 30},
-#line 74 "gperf-config.txt"
- {"BEGIN", PLUGINSD_KEYWORD_ID_BEGIN, PARSER_INIT_PLUGINSD|PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 8},
-#line 92 "gperf-config.txt"
- {"BEGIN2", PLUGINSD_KEYWORD_ID_BEGIN2, PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 23},
-#line 103 "gperf-config.txt"
- {"RSSTATE", PLUGINSD_KEYWORD_ID_RSSTATE, PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 31},
-#line 64 "gperf-config.txt"
- {"FLUSH", PLUGINSD_KEYWORD_ID_FLUSH, PARSER_INIT_PLUGINSD, WORKER_PARSER_FIRST_JOB + 1},
-#line 85 "gperf-config.txt"
- {"SET", PLUGINSD_KEYWORD_ID_SET, PARSER_INIT_PLUGINSD|PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 19},
-#line 93 "gperf-config.txt"
- {"SET2", PLUGINSD_KEYWORD_ID_SET2, PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 24},
- {(char*)0,0,PARSER_INIT_PLUGINSD,0},
-#line 76 "gperf-config.txt"
- {"CLABEL", PLUGINSD_KEYWORD_ID_CLABEL, PARSER_INIT_PLUGINSD|PARSER_INIT_STREAMING|PARSER_REP_METADATA, WORKER_PARSER_FIRST_JOB + 10},
-#line 65 "gperf-config.txt"
- {"DISABLE", PLUGINSD_KEYWORD_ID_DISABLE, PARSER_INIT_PLUGINSD, WORKER_PARSER_FIRST_JOB + 2},
-#line 83 "gperf-config.txt"
- {"LABEL", PLUGINSD_KEYWORD_ID_LABEL, PARSER_INIT_PLUGINSD|PARSER_INIT_STREAMING|PARSER_REP_METADATA, WORKER_PARSER_FIRST_JOB + 17},
-#line 78 "gperf-config.txt"
- {"DIMENSION", PLUGINSD_KEYWORD_ID_DIMENSION, PARSER_INIT_PLUGINSD|PARSER_INIT_STREAMING|PARSER_REP_METADATA, WORKER_PARSER_FIRST_JOB + 12},
-#line 91 "gperf-config.txt"
- {"CLAIMED_ID", PLUGINSD_KEYWORD_ID_CLAIMED_ID, PARSER_INIT_STREAMING|PARSER_REP_METADATA, WORKER_PARSER_FIRST_JOB + 22},
- {(char*)0,0,PARSER_INIT_PLUGINSD,0},
- {(char*)0,0,PARSER_INIT_PLUGINSD,0},
-#line 77 "gperf-config.txt"
- {"CLABEL_COMMIT", PLUGINSD_KEYWORD_ID_CLABEL_COMMIT, PARSER_INIT_PLUGINSD|PARSER_INIT_STREAMING|PARSER_REP_METADATA, WORKER_PARSER_FIRST_JOB + 11},
- {(char*)0,0,PARSER_INIT_PLUGINSD,0},
-#line 79 "gperf-config.txt"
- {"END", PLUGINSD_KEYWORD_ID_END, PARSER_INIT_PLUGINSD|PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 13},
-#line 94 "gperf-config.txt"
- {"END2", PLUGINSD_KEYWORD_ID_END2, PARSER_INIT_STREAMING, WORKER_PARSER_FIRST_JOB + 25}
- };
-
-const PARSER_KEYWORD *
-gperf_lookup_keyword (register const char *str, register size_t len)
-{
- if (len <= GPERF_PARSER_MAX_WORD_LENGTH && len >= GPERF_PARSER_MIN_WORD_LENGTH)
- {
- register unsigned int key = gperf_keyword_hash_function (str, len);
-
- if (key <= GPERF_PARSER_MAX_HASH_VALUE)
- {
- register const char *s = gperf_keywords[key].keyword;
-
- if (s && *str == *s && !strcmp (str + 1, s + 1))
- return &gperf_keywords[key];
- }
- }
- return 0;
-}
diff --git a/src/collectors/plugins.d/plugins_d.c b/src/collectors/plugins.d/plugins_d.c
deleted file mode 100644
index 85f1563c3..000000000
--- a/src/collectors/plugins.d/plugins_d.c
+++ /dev/null
@@ -1,350 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-#include "plugins_d.h"
-#include "pluginsd_parser.h"
-
-char *plugin_directories[PLUGINSD_MAX_DIRECTORIES] = { [0] = PLUGINS_DIR, };
-struct plugind *pluginsd_root = NULL;
-
-static inline void pluginsd_sleep(const int seconds) {
- int timeout_ms = seconds * 1000;
- int waited_ms = 0;
- while(waited_ms < timeout_ms) {
- if(!service_running(SERVICE_COLLECTORS)) break;
- sleep_usec(ND_CHECK_CANCELLABILITY_WHILE_WAITING_EVERY_MS * USEC_PER_MS);
- waited_ms += ND_CHECK_CANCELLABILITY_WHILE_WAITING_EVERY_MS;
- }
-}
-
-inline size_t pluginsd_initialize_plugin_directories()
-{
- char plugins_dirs[(FILENAME_MAX * 2) + 1];
- static char *plugins_dir_list = NULL;
-
- // Get the configuration entry
- if (likely(!plugins_dir_list)) {
- snprintfz(plugins_dirs, FILENAME_MAX * 2, "\"%s\" \"%s/custom-plugins.d\"", PLUGINS_DIR, CONFIG_DIR);
- plugins_dir_list = strdupz(config_get(CONFIG_SECTION_DIRECTORIES, "plugins", plugins_dirs));
- }
-
- // Parse it and store it to plugin directories
- return quoted_strings_splitter_config(plugins_dir_list, plugin_directories, PLUGINSD_MAX_DIRECTORIES);
-}
-
-static inline void plugin_set_disabled(struct plugind *cd) {
- spinlock_lock(&cd->unsafe.spinlock);
- cd->unsafe.enabled = false;
- spinlock_unlock(&cd->unsafe.spinlock);
-}
-
-bool plugin_is_enabled(struct plugind *cd) {
- spinlock_lock(&cd->unsafe.spinlock);
- bool ret = cd->unsafe.enabled;
- spinlock_unlock(&cd->unsafe.spinlock);
- return ret;
-}
-
-static inline void plugin_set_running(struct plugind *cd) {
- spinlock_lock(&cd->unsafe.spinlock);
- cd->unsafe.running = true;
- spinlock_unlock(&cd->unsafe.spinlock);
-}
-
-static inline bool plugin_is_running(struct plugind *cd) {
- spinlock_lock(&cd->unsafe.spinlock);
- bool ret = cd->unsafe.running;
- spinlock_unlock(&cd->unsafe.spinlock);
- return ret;
-}
-
-static void pluginsd_worker_thread_cleanup(void *pptr) {
- struct plugind *cd = CLEANUP_FUNCTION_GET_PTR(pptr);
- if(!cd) return;
-
- worker_unregister();
-
- spinlock_lock(&cd->unsafe.spinlock);
-
- cd->unsafe.running = false;
- cd->unsafe.thread = 0;
-
- cd->unsafe.pid = 0;
-
- POPEN_INSTANCE *pi = cd->unsafe.pi;
- cd->unsafe.pi = NULL;
-
- spinlock_unlock(&cd->unsafe.spinlock);
-
- if (pi)
- spawn_popen_kill(pi);
-}
-
-#define SERIAL_FAILURES_THRESHOLD 10
-static void pluginsd_worker_thread_handle_success(struct plugind *cd) {
- if (likely(cd->successful_collections)) {
- pluginsd_sleep(cd->update_every);
- return;
- }
-
- if (likely(cd->serial_failures <= SERIAL_FAILURES_THRESHOLD)) {
- netdata_log_info("PLUGINSD: 'host:%s', '%s' (pid %d) does not generate useful output but it reports success (exits with 0). %s.",
- rrdhost_hostname(cd->host), cd->fullfilename, cd->unsafe.pid,
- plugin_is_enabled(cd) ? "Waiting a bit before starting it again." : "Will not start it again - it is now disabled.");
-
- pluginsd_sleep(cd->update_every * 10);
- return;
- }
-
- if (cd->serial_failures > SERIAL_FAILURES_THRESHOLD) {
- netdata_log_error("PLUGINSD: 'host:'%s', '%s' (pid %d) does not generate useful output, "
- "although it reports success (exits with 0)."
- "We have tried to collect something %zu times - unsuccessfully. Disabling it.",
- rrdhost_hostname(cd->host), cd->fullfilename, cd->unsafe.pid, cd->serial_failures);
- plugin_set_disabled(cd);
- return;
- }
-}
-
-static void pluginsd_worker_thread_handle_error(struct plugind *cd, int worker_ret_code) {
- if (worker_ret_code == -1) {
- netdata_log_info("PLUGINSD: 'host:%s', '%s' (pid %d) was killed with SIGTERM. Disabling it.",
- rrdhost_hostname(cd->host), cd->fullfilename, cd->unsafe.pid);
- plugin_set_disabled(cd);
- return;
- }
-
- if (!cd->successful_collections) {
- netdata_log_error("PLUGINSD: 'host:%s', '%s' (pid %d) exited with error code %d and haven't collected any data. Disabling it.",
- rrdhost_hostname(cd->host), cd->fullfilename, cd->unsafe.pid, worker_ret_code);
- plugin_set_disabled(cd);
- return;
- }
-
- if (cd->serial_failures <= SERIAL_FAILURES_THRESHOLD) {
- netdata_log_error("PLUGINSD: 'host:%s', '%s' (pid %d) exited with error code %d, but has given useful output in the past (%zu times). %s",
- rrdhost_hostname(cd->host), cd->fullfilename, cd->unsafe.pid, worker_ret_code, cd->successful_collections,
- plugin_is_enabled(cd) ? "Waiting a bit before starting it again." : "Will not start it again - it is disabled.");
-
- pluginsd_sleep(cd->update_every * 10);
- return;
- }
-
- if (cd->serial_failures > SERIAL_FAILURES_THRESHOLD) {
- netdata_log_error("PLUGINSD: 'host:%s', '%s' (pid %d) exited with error code %d, but has given useful output in the past (%zu times)."
- "We tried to restart it %zu times, but it failed to generate data. Disabling it.",
- rrdhost_hostname(cd->host), cd->fullfilename, cd->unsafe.pid, worker_ret_code,
- cd->successful_collections, cd->serial_failures);
- plugin_set_disabled(cd);
- return;
- }
-}
-
-#undef SERIAL_FAILURES_THRESHOLD
-
-static void *pluginsd_worker_thread(void *arg) {
- struct plugind *cd = (struct plugind *) arg;
- CLEANUP_FUNCTION_REGISTER(pluginsd_worker_thread_cleanup) cleanup_ptr = cd;
-
- worker_register("PLUGINSD");
-
- plugin_set_running(cd);
-
- size_t count = 0;
-
- while(service_running(SERVICE_COLLECTORS)) {
- cd->unsafe.pi = spawn_popen_run(cd->cmd);
- if(!cd->unsafe.pi) {
- netdata_log_error("PLUGINSD: 'host:%s', cannot popen(\"%s\", \"r\").",
- rrdhost_hostname(cd->host), cd->cmd);
- break;
- }
- cd->unsafe.pid = spawn_server_instance_pid(cd->unsafe.pi->si);
-
- nd_log(NDLS_DAEMON, NDLP_DEBUG,
- "PLUGINSD: 'host:%s' connected to '%s' running on pid %d",
- rrdhost_hostname(cd->host),
- cd->fullfilename, cd->unsafe.pid);
-
- const char *plugin = strrchr(cd->fullfilename, '/');
- if(plugin)
- plugin++;
- else
- plugin = cd->fullfilename;
-
- char module[100];
- snprintfz(module, sizeof(module), "plugins.d[%s]", plugin);
- ND_LOG_STACK lgs[] = {
- ND_LOG_FIELD_TXT(NDF_MODULE, module),
- ND_LOG_FIELD_TXT(NDF_NIDL_NODE, rrdhost_hostname(cd->host)),
- ND_LOG_FIELD_TXT(NDF_SRC_TRANSPORT, "pluginsd"),
- ND_LOG_FIELD_END(),
- };
- ND_LOG_STACK_PUSH(lgs);
-
- count = pluginsd_process(cd->host, cd, cd->unsafe.pi->child_stdin_fp, cd->unsafe.pi->child_stdout_fp, 0);
-
- nd_log(NDLS_DAEMON, NDLP_DEBUG,
- "PLUGINSD: 'host:%s', '%s' (pid %d) disconnected after %zu successful data collections (ENDs).",
- rrdhost_hostname(cd->host), cd->fullfilename, cd->unsafe.pid, count);
-
- int worker_ret_code = spawn_popen_kill(cd->unsafe.pi);
- cd->unsafe.pi = NULL;
-
- if(likely(worker_ret_code == 0))
- pluginsd_worker_thread_handle_success(cd);
- else
- pluginsd_worker_thread_handle_error(cd, worker_ret_code);
-
- cd->unsafe.pid = 0;
-
- if(unlikely(!plugin_is_enabled(cd)))
- break;
- }
- return NULL;
-}
-
-static void pluginsd_main_cleanup(void *pptr) {
- struct netdata_static_thread *static_thread = CLEANUP_FUNCTION_GET_PTR(pptr);
- if(!static_thread) return;
-
- static_thread->enabled = NETDATA_MAIN_THREAD_EXITING;
- netdata_log_info("PLUGINSD: cleaning up...");
-
- struct plugind *cd;
- for (cd = pluginsd_root; cd; cd = cd->next) {
- spinlock_lock(&cd->unsafe.spinlock);
- if (cd->unsafe.enabled && cd->unsafe.running && cd->unsafe.thread != 0) {
- netdata_log_info("PLUGINSD: 'host:%s', stopping plugin thread: %s",
- rrdhost_hostname(cd->host), cd->id);
-
- nd_thread_signal_cancel(cd->unsafe.thread);
- }
- spinlock_unlock(&cd->unsafe.spinlock);
- }
-
- netdata_log_info("PLUGINSD: cleanup completed.");
- static_thread->enabled = NETDATA_MAIN_THREAD_EXITED;
-
- worker_unregister();
-}
-
-void *pluginsd_main(void *ptr) {
- CLEANUP_FUNCTION_REGISTER(pluginsd_main_cleanup) cleanup_ptr = ptr;
-
- int automatic_run = config_get_boolean(CONFIG_SECTION_PLUGINS, "enable running new plugins", 1);
- int scan_frequency = (int)config_get_number(CONFIG_SECTION_PLUGINS, "check for new plugins every", 60);
- if (scan_frequency < 1)
- scan_frequency = 1;
-
- // disable some plugins by default
- config_get_boolean(CONFIG_SECTION_PLUGINS, "slabinfo", CONFIG_BOOLEAN_NO);
- // it crashes (both threads) on Alpine after we made it multi-threaded
- // works with "--device /dev/ipmi0", but this is not default
- // see https://github.com/netdata/netdata/pull/15564 for details
- if (getenv("NETDATA_LISTENER_PORT"))
- config_get_boolean(CONFIG_SECTION_PLUGINS, "freeipmi", CONFIG_BOOLEAN_NO);
-
- // store the errno for each plugins directory
- // so that we don't log broken directories on each loop
- int directory_errors[PLUGINSD_MAX_DIRECTORIES] = { 0 };
-
- while (service_running(SERVICE_COLLECTORS)) {
- int idx;
- const char *directory_name;
-
- for (idx = 0; idx < PLUGINSD_MAX_DIRECTORIES && (directory_name = plugin_directories[idx]); idx++) {
- if (unlikely(!service_running(SERVICE_COLLECTORS)))
- break;
-
- errno_clear();
- DIR *dir = opendir(directory_name);
- if (unlikely(!dir)) {
- if (directory_errors[idx] != errno) {
- directory_errors[idx] = errno;
- netdata_log_error("cannot open plugins directory '%s'", directory_name);
- }
- continue;
- }
-
- struct dirent *file = NULL;
- while (likely((file = readdir(dir)))) {
- if (unlikely(!service_running(SERVICE_COLLECTORS)))
- break;
-
- netdata_log_debug(D_PLUGINSD, "examining file '%s'", file->d_name);
-
- if (unlikely(strcmp(file->d_name, ".") == 0 || strcmp(file->d_name, "..") == 0))
- continue;
-
- int len = (int)strlen(file->d_name);
- if (unlikely(len <= (int)PLUGINSD_FILE_SUFFIX_LEN))
- continue;
- if (unlikely(strcmp(PLUGINSD_FILE_SUFFIX, &file->d_name[len - (int)PLUGINSD_FILE_SUFFIX_LEN]) != 0)) {
- netdata_log_debug(D_PLUGINSD, "file '%s' does not end in '%s'", file->d_name, PLUGINSD_FILE_SUFFIX);
- continue;
- }
-
- char pluginname[CONFIG_MAX_NAME + 1];
- snprintfz(pluginname, CONFIG_MAX_NAME, "%.*s", (int)(len - PLUGINSD_FILE_SUFFIX_LEN), file->d_name);
- int enabled = config_get_boolean(CONFIG_SECTION_PLUGINS, pluginname, automatic_run);
-
- if (unlikely(!enabled)) {
- netdata_log_debug(D_PLUGINSD, "plugin '%s' is not enabled", file->d_name);
- continue;
- }
-
- // check if it runs already
- struct plugind *cd;
- for (cd = pluginsd_root; cd; cd = cd->next)
- if (unlikely(strcmp(cd->filename, file->d_name) == 0))
- break;
-
- if (likely(cd && plugin_is_running(cd))) {
- netdata_log_debug(D_PLUGINSD, "plugin '%s' is already running", cd->filename);
- continue;
- }
-
- // it is not running
- // allocate a new one, or use the obsolete one
- if (unlikely(!cd)) {
- cd = callocz(sizeof(struct plugind), 1);
-
- snprintfz(cd->id, CONFIG_MAX_NAME, "plugin:%s", pluginname);
-
- strncpyz(cd->filename, file->d_name, FILENAME_MAX);
- snprintfz(cd->fullfilename, FILENAME_MAX, "%s/%s", directory_name, cd->filename);
-
- cd->host = localhost;
- cd->unsafe.enabled = enabled;
- cd->unsafe.running = false;
-
- cd->update_every = (int)config_get_number(cd->id, "update every", localhost->rrd_update_every);
- cd->started_t = now_realtime_sec();
-
- char *def = "";
- snprintfz(
- cd->cmd, PLUGINSD_CMD_MAX, "exec %s %d %s", cd->fullfilename, cd->update_every,
- config_get(cd->id, "command options", def));
-
- // link it
- DOUBLE_LINKED_LIST_PREPEND_ITEM_UNSAFE(pluginsd_root, cd, prev, next);
-
- if (plugin_is_enabled(cd)) {
- char tag[NETDATA_THREAD_TAG_MAX + 1];
- snprintfz(tag, NETDATA_THREAD_TAG_MAX, "PD[%s]", pluginname);
-
- // spawn a new thread for it
- cd->unsafe.thread = nd_thread_create(tag, NETDATA_THREAD_OPTION_DEFAULT,
- pluginsd_worker_thread, cd);
- }
- }
- }
-
- closedir(dir);
- }
-
- pluginsd_sleep(scan_frequency);
- }
-
- return NULL;
-}
diff --git a/src/collectors/plugins.d/plugins_d.h b/src/collectors/plugins.d/plugins_d.h
deleted file mode 100644
index 51efa5a72..000000000
--- a/src/collectors/plugins.d/plugins_d.h
+++ /dev/null
@@ -1,54 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-#ifndef NETDATA_PLUGINS_D_H
-#define NETDATA_PLUGINS_D_H 1
-
-#include "daemon/common.h"
-
-#define PLUGINSD_FILE_SUFFIX ".plugin"
-#define PLUGINSD_FILE_SUFFIX_LEN strlen(PLUGINSD_FILE_SUFFIX)
-#define PLUGINSD_CMD_MAX (FILENAME_MAX*2)
-#define PLUGINSD_STOCK_PLUGINS_DIRECTORY_PATH 0
-
-#define PLUGINSD_MAX_DIRECTORIES 20
-extern char *plugin_directories[PLUGINSD_MAX_DIRECTORIES];
-
-struct plugind {
- char id[CONFIG_MAX_NAME+1]; // config node id
-
- char filename[FILENAME_MAX+1]; // just the filename
- char fullfilename[FILENAME_MAX+1]; // with path
- char cmd[PLUGINSD_CMD_MAX+1]; // the command that it executes
-
- size_t successful_collections; // the number of times we have seen
- // values collected from this plugin
-
- size_t serial_failures; // the number of times the plugin started
- // without collecting values
-
- RRDHOST *host; // the host the plugin collects data for
- int update_every; // the plugin default data collection frequency
-
- struct {
- SPINLOCK spinlock;
- bool running; // do not touch this structure after setting this to 1
- bool enabled; // if this is enabled or not
- ND_THREAD *thread;
- POPEN_INSTANCE *pi;
- pid_t pid;
- } unsafe;
-
- time_t started_t;
-
- struct plugind *prev;
- struct plugind *next;
-};
-
-extern struct plugind *pluginsd_root;
-
-size_t pluginsd_process(RRDHOST *host, struct plugind *cd, FILE *fp_plugin_input, FILE *fp_plugin_output, int trust_durations);
-void pluginsd_process_thread_cleanup(void *pptr);
-
-size_t pluginsd_initialize_plugin_directories();
-
-#endif /* NETDATA_PLUGINS_D_H */
diff --git a/src/collectors/plugins.d/pluginsd_dyncfg.c b/src/collectors/plugins.d/pluginsd_dyncfg.c
deleted file mode 100644
index c4dd42a73..000000000
--- a/src/collectors/plugins.d/pluginsd_dyncfg.c
+++ /dev/null
@@ -1,69 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-#include "pluginsd_dyncfg.h"
-
-
-// ----------------------------------------------------------------------------
-
-PARSER_RC pluginsd_config(char **words, size_t num_words, PARSER *parser) {
- RRDHOST *host = pluginsd_require_scope_host(parser, PLUGINSD_KEYWORD_CONFIG);
- if(!host) return PARSER_RC_ERROR;
-
- size_t i = 1;
- char *id = get_word(words, num_words, i++);
- char *action = get_word(words, num_words, i++);
-
- if(strcmp(action, PLUGINSD_KEYWORD_CONFIG_ACTION_CREATE) == 0) {
- char *status_str = get_word(words, num_words, i++);
- char *type_str = get_word(words, num_words, i++);
- char *path = get_word(words, num_words, i++);
- char *source_type_str = get_word(words, num_words, i++);
- char *source = get_word(words, num_words, i++);
- char *supported_cmds_str = get_word(words, num_words, i++);
- char *view_permissions_str = get_word(words, num_words, i++);
- char *edit_permissions_str = get_word(words, num_words, i++);
-
- DYNCFG_STATUS status = dyncfg_status2id(status_str);
- DYNCFG_TYPE type = dyncfg_type2id(type_str);
- DYNCFG_SOURCE_TYPE source_type = dyncfg_source_type2id(source_type_str);
- DYNCFG_CMDS cmds = dyncfg_cmds2id(supported_cmds_str);
- HTTP_ACCESS view_access = http_access_from_hex(view_permissions_str);
- HTTP_ACCESS edit_access = http_access_from_hex(edit_permissions_str);
-
- if(!dyncfg_add_low_level(
- host,
- id,
- path,
- status,
- type,
- source_type,
- source,
- cmds,
- 0,
- 0,
- false,
- view_access,
- edit_access,
- pluginsd_function_execute_cb,
- parser))
- return PARSER_RC_ERROR;
- }
- else if(strcmp(action, PLUGINSD_KEYWORD_CONFIG_ACTION_DELETE) == 0) {
- dyncfg_del_low_level(host, id);
- }
- else if(strcmp(action, PLUGINSD_KEYWORD_CONFIG_ACTION_STATUS) == 0) {
- char *status_str = get_word(words, num_words, i++);
- dyncfg_status_low_level(host, id, dyncfg_status2id(status_str));
- }
- else
- nd_log(NDLS_COLLECTORS, NDLP_WARNING, "DYNCFG: unknown action '%s' received from plugin", action);
-
- parser->user.data_collections_count++;
- return PARSER_RC_OK;
-}
-
-// ----------------------------------------------------------------------------
-
-PARSER_RC pluginsd_dyncfg_noop(char **words __maybe_unused, size_t num_words __maybe_unused, PARSER *parser __maybe_unused) {
- return PARSER_RC_OK;
-}
diff --git a/src/collectors/plugins.d/pluginsd_dyncfg.h b/src/collectors/plugins.d/pluginsd_dyncfg.h
deleted file mode 100644
index fd35a3c36..000000000
--- a/src/collectors/plugins.d/pluginsd_dyncfg.h
+++ /dev/null
@@ -1,11 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-#ifndef NETDATA_PLUGINSD_DYNCFG_H
-#define NETDATA_PLUGINSD_DYNCFG_H
-
-#include "pluginsd_internals.h"
-
-PARSER_RC pluginsd_config(char **words, size_t num_words, PARSER *parser);
-PARSER_RC pluginsd_dyncfg_noop(char **words, size_t num_words, PARSER *parser);
-
-#endif //NETDATA_PLUGINSD_DYNCFG_H
diff --git a/src/collectors/plugins.d/pluginsd_functions.c b/src/collectors/plugins.d/pluginsd_functions.c
deleted file mode 100644
index 4ea6d4812..000000000
--- a/src/collectors/plugins.d/pluginsd_functions.c
+++ /dev/null
@@ -1,412 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-#include "pluginsd_functions.h"
-
-#define LOG_FUNCTIONS false
-
-// ----------------------------------------------------------------------------
-// execution of functions
-
-static void inflight_functions_insert_callback(const DICTIONARY_ITEM *item, void *func, void *parser_ptr) {
- struct inflight_function *pf = func;
-
- PARSER *parser = parser_ptr;
-
- // leave this code as default, so that when the dictionary is destroyed this will be sent back to the caller
- pf->code = HTTP_RESP_SERVICE_UNAVAILABLE;
-
- const char *transaction = dictionary_acquired_item_name(item);
-
- int rc = uuid_parse_flexi(transaction, pf->transaction);
- if(rc != 0)
- netdata_log_error("FUNCTION: '%s': cannot parse transaction UUID", string2str(pf->function));
-
- CLEAN_BUFFER *buffer = buffer_create(1024, NULL);
- if(pf->payload && buffer_strlen(pf->payload)) {
- buffer_sprintf(
- buffer,
- PLUGINSD_CALL_FUNCTION_PAYLOAD_BEGIN " %s %d \"%s\" \""HTTP_ACCESS_FORMAT"\" \"%s\" \"%s\"\n",
- transaction,
- pf->timeout_s,
- string2str(pf->function),
- (HTTP_ACCESS_FORMAT_CAST)pf->access,
- pf->source ? pf->source : "",
- content_type_id2string(pf->payload->content_type)
- );
-
- buffer_fast_strcat(buffer, buffer_tostring(pf->payload), buffer_strlen(pf->payload));
- buffer_strcat(buffer, "\nFUNCTION_PAYLOAD_END\n");
- }
- else {
- buffer_sprintf(
- buffer,
- PLUGINSD_CALL_FUNCTION " %s %d \"%s\" \""HTTP_ACCESS_FORMAT"\" \"%s\"\n",
- transaction,
- pf->timeout_s,
- string2str(pf->function),
- (HTTP_ACCESS_FORMAT_CAST)pf->access,
- pf->source ? pf->source : ""
- );
- }
-
- // send the command to the plugin
- // IMPORTANT: make sure all commands are sent in 1 call, because in streaming they may interfere with others
- ssize_t ret = send_to_plugin(buffer_tostring(buffer), parser);
- pf->sent_monotonic_ut = now_monotonic_usec();
-
- if(ret < 0) {
- pf->sent_successfully = false;
-
- pf->code = HTTP_RESP_SERVICE_UNAVAILABLE;
- netdata_log_error("FUNCTION '%s': failed to send it to the plugin, error %zd", string2str(pf->function), ret);
- rrd_call_function_error(pf->result_body_wb, "Failed to communicate with collector", pf->code);
- }
- else {
- pf->sent_successfully = true;
-
- internal_error(LOG_FUNCTIONS,
- "FUNCTION '%s' with transaction '%s' sent to collector (%zd bytes, in %"PRIu64" usec)",
- string2str(pf->function), dictionary_acquired_item_name(item), ret,
- pf->sent_monotonic_ut - pf->started_monotonic_ut);
- }
-}
-
-static bool inflight_functions_conflict_callback(const DICTIONARY_ITEM *item __maybe_unused, void *func __maybe_unused, void *new_func, void *parser_ptr __maybe_unused) {
- struct inflight_function *pf = new_func;
-
- netdata_log_error("PLUGINSD_PARSER: duplicate UUID on pending function '%s' detected. Ignoring the second one.", string2str(pf->function));
- pf->code = rrd_call_function_error(pf->result_body_wb, "This request is already in progress", HTTP_RESP_BAD_REQUEST);
- pf->result.cb(pf->result_body_wb, pf->code, pf->result.data);
- string_freez(pf->function);
-
- return false;
-}
-
-static void inflight_functions_delete_callback(const DICTIONARY_ITEM *item __maybe_unused, void *func, void *parser_ptr) {
- struct inflight_function *pf = func;
- struct parser *parser = (struct parser *)parser_ptr; (void)parser;
-
- internal_error(LOG_FUNCTIONS,
- "FUNCTION '%s' result of transaction '%s' received from collector "
- "(%zu bytes, request %"PRIu64" usec, response %"PRIu64" usec)",
- string2str(pf->function), dictionary_acquired_item_name(item),
- buffer_strlen(pf->result_body_wb),
- pf->sent_monotonic_ut - pf->started_monotonic_ut, now_realtime_usec() - pf->sent_monotonic_ut);
-
- if(pf->code == HTTP_RESP_SERVICE_UNAVAILABLE && !buffer_strlen(pf->result_body_wb))
- rrd_call_function_error(pf->result_body_wb, "The plugin exited while servicing this call.", pf->code);
-
- pf->result.cb(pf->result_body_wb, pf->code, pf->result.data);
-
- string_freez(pf->function);
- buffer_free((void *)pf->payload);
- freez((void *)pf->source);
-}
-
-void pluginsd_inflight_functions_init(PARSER *parser) {
- parser->inflight.functions = dictionary_create_advanced(DICT_OPTION_DONT_OVERWRITE_VALUE, &dictionary_stats_category_functions, 0);
- dictionary_register_insert_callback(parser->inflight.functions, inflight_functions_insert_callback, parser);
- dictionary_register_delete_callback(parser->inflight.functions, inflight_functions_delete_callback, parser);
- dictionary_register_conflict_callback(parser->inflight.functions, inflight_functions_conflict_callback, parser);
-}
-
-void pluginsd_inflight_functions_cleanup(PARSER *parser) {
- dictionary_destroy(parser->inflight.functions);
-}
-
-// ----------------------------------------------------------------------------
-
-void pluginsd_inflight_functions_garbage_collect(PARSER *parser, usec_t now_ut) {
- parser->inflight.smaller_monotonic_timeout_ut = 0;
- struct inflight_function *pf;
- dfe_start_write(parser->inflight.functions, pf) {
- if (*pf->stop_monotonic_ut + RRDFUNCTIONS_TIMEOUT_EXTENSION_UT < now_ut) {
- internal_error(true,
- "FUNCTION '%s' removing expired transaction '%s', after %"PRIu64" usec.",
- string2str(pf->function), pf_dfe.name, now_ut - pf->started_monotonic_ut);
-
- if(!buffer_strlen(pf->result_body_wb) || pf->code == HTTP_RESP_OK)
- pf->code = rrd_call_function_error(pf->result_body_wb,
- "Timeout waiting for collector response.",
- HTTP_RESP_GATEWAY_TIMEOUT);
-
- dictionary_del(parser->inflight.functions, pf_dfe.name);
- }
-
- else if(!parser->inflight.smaller_monotonic_timeout_ut || *pf->stop_monotonic_ut + RRDFUNCTIONS_TIMEOUT_EXTENSION_UT < parser->inflight.smaller_monotonic_timeout_ut)
- parser->inflight.smaller_monotonic_timeout_ut = *pf->stop_monotonic_ut + RRDFUNCTIONS_TIMEOUT_EXTENSION_UT;
- }
- dfe_done(pf);
-}
-
-// ----------------------------------------------------------------------------
-
-static void pluginsd_function_cancel(void *data) {
- struct inflight_function *look_for = data, *t;
-
- bool sent = false;
- dfe_start_read(look_for->parser->inflight.functions, t) {
- if(look_for == t) {
- const char *transaction = t_dfe.name;
-
- internal_error(true, "PLUGINSD: sending function cancellation to plugin for transaction '%s'", transaction);
-
- char buffer[2048];
- snprintfz(buffer, sizeof(buffer), PLUGINSD_CALL_FUNCTION_CANCEL " %s\n", transaction);
-
- // send the command to the plugin
- ssize_t ret = send_to_plugin(buffer, t->parser);
- if(ret < 0)
- sent = true;
-
- break;
- }
- }
- dfe_done(t);
-
- if(sent <= 0)
- nd_log(NDLS_DAEMON, NDLP_DEBUG,
- "PLUGINSD: FUNCTION_CANCEL request didn't match any pending function requests in pluginsd.d.");
-}
-
-static void pluginsd_function_progress_to_plugin(void *data) {
- struct inflight_function *look_for = data, *t;
-
- bool sent = false;
- dfe_start_read(look_for->parser->inflight.functions, t) {
- if(look_for == t) {
- const char *transaction = t_dfe.name;
-
- internal_error(true, "PLUGINSD: sending function progress to plugin for transaction '%s'", transaction);
-
- char buffer[2048];
- snprintfz(buffer, sizeof(buffer), PLUGINSD_CALL_FUNCTION_PROGRESS " %s\n", transaction);
-
- // send the command to the plugin
- ssize_t ret = send_to_plugin(buffer, t->parser);
- if(ret < 0)
- sent = true;
-
- break;
- }
- }
- dfe_done(t);
-
- if(sent <= 0)
- nd_log(NDLS_DAEMON, NDLP_DEBUG,
- "PLUGINSD: FUNCTION_PROGRESS request didn't match any pending function requests in pluginsd.d.");
-}
-
-// this is the function called from
-// rrd_call_function_and_wait() and rrd_call_function_async()
-int pluginsd_function_execute_cb(struct rrd_function_execute *rfe, void *data) {
-
- // IMPORTANT: this function MUST call the result_cb even on failures
-
- PARSER *parser = data;
-
- usec_t now_ut = now_monotonic_usec();
-
- int timeout_s = (int)((*rfe->stop_monotonic_ut - now_ut + USEC_PER_SEC / 2) / USEC_PER_SEC);
-
- struct inflight_function tmp = {
- .started_monotonic_ut = now_ut,
- .stop_monotonic_ut = rfe->stop_monotonic_ut,
- .result_body_wb = rfe->result.wb,
- .timeout_s = timeout_s,
- .function = string_strdupz(rfe->function),
- .payload = buffer_dup(rfe->payload),
- .access = rfe->user_access,
- .source = rfe->source ? strdupz(rfe->source) : NULL,
- .parser = parser,
-
- .result = {
- .cb = rfe->result.cb,
- .data = rfe->result.data,
- },
- .progress = {
- .cb = rfe->progress.cb,
- .data = rfe->progress.data,
- },
- };
- uuid_copy(tmp.transaction, *rfe->transaction);
-
- char transaction_str[UUID_COMPACT_STR_LEN];
- uuid_unparse_lower_compact(tmp.transaction, transaction_str);
-
- dictionary_write_lock(parser->inflight.functions);
-
- // if there is any error, our dictionary callbacks will call the caller callback to notify
- // the caller about the error - no need for error handling here.
- struct inflight_function *t = dictionary_set(parser->inflight.functions, transaction_str, &tmp, sizeof(struct inflight_function));
- if(!t->sent_successfully) {
- int code = t->code;
- dictionary_write_unlock(parser->inflight.functions);
- dictionary_del(parser->inflight.functions, transaction_str);
- pluginsd_inflight_functions_garbage_collect(parser, now_ut);
- return code;
- }
- else {
- if (rfe->register_canceller.cb)
- rfe->register_canceller.cb(rfe->register_canceller.data, pluginsd_function_cancel, t);
-
- if (rfe->register_progresser.cb &&
- (parser->repertoire == PARSER_INIT_PLUGINSD || (parser->repertoire == PARSER_INIT_STREAMING &&
- stream_has_capability(&parser->user, STREAM_CAP_PROGRESS))))
- rfe->register_progresser.cb(rfe->register_progresser.data, pluginsd_function_progress_to_plugin, t);
-
- if (!parser->inflight.smaller_monotonic_timeout_ut ||
- *tmp.stop_monotonic_ut + RRDFUNCTIONS_TIMEOUT_EXTENSION_UT < parser->inflight.smaller_monotonic_timeout_ut)
- parser->inflight.smaller_monotonic_timeout_ut = *tmp.stop_monotonic_ut + RRDFUNCTIONS_TIMEOUT_EXTENSION_UT;
-
- // garbage collect stale inflight functions
- if (parser->inflight.smaller_monotonic_timeout_ut < now_ut)
- pluginsd_inflight_functions_garbage_collect(parser, now_ut);
-
- dictionary_write_unlock(parser->inflight.functions);
-
- return HTTP_RESP_OK;
- }
-}
-
-PARSER_RC pluginsd_function(char **words, size_t num_words, PARSER *parser) {
- // a plugin or a child is registering a function
-
- bool global = false;
- size_t i = 1;
- if(num_words >= 2 && strcmp(get_word(words, num_words, 1), "GLOBAL") == 0) {
- i++;
- global = true;
- }
-
- char *name = get_word(words, num_words, i++);
- char *timeout_str = get_word(words, num_words, i++);
- char *help = get_word(words, num_words, i++);
- char *tags = get_word(words, num_words, i++);
- char *access_str = get_word(words, num_words, i++);
- char *priority_str = get_word(words, num_words, i++);
-
- RRDHOST *host = pluginsd_require_scope_host(parser, PLUGINSD_KEYWORD_FUNCTION);
- if(!host) return PARSER_RC_ERROR;
-
- RRDSET *st = (global)? NULL: pluginsd_require_scope_chart(parser, PLUGINSD_KEYWORD_FUNCTION, PLUGINSD_KEYWORD_CHART);
- if(!st) global = true;
-
- if (unlikely(!timeout_str || !name || !help || (!global && !st))) {
- netdata_log_error("PLUGINSD: 'host:%s/chart:%s' got a FUNCTION, without providing the required data (global = '%s', name = '%s', timeout = '%s', help = '%s'). Ignoring it.",
- rrdhost_hostname(host),
- st?rrdset_id(st):"(unset)",
- global?"yes":"no",
- name?name:"(unset)",
- timeout_str ? timeout_str : "(unset)",
- help?help:"(unset)"
- );
- return PARSER_RC_ERROR;
- }
-
- int timeout_s = PLUGINS_FUNCTIONS_TIMEOUT_DEFAULT;
- if (timeout_str && *timeout_str) {
- timeout_s = str2i(timeout_str);
- if (unlikely(timeout_s <= 0))
- timeout_s = PLUGINS_FUNCTIONS_TIMEOUT_DEFAULT;
- }
-
- int priority = RRDFUNCTIONS_PRIORITY_DEFAULT;
- if(priority_str && *priority_str) {
- priority = str2i(priority_str);
- if(priority <= 0)
- priority = RRDFUNCTIONS_PRIORITY_DEFAULT;
- }
-
- rrd_function_add(host, st, name, timeout_s, priority, help, tags,
- http_access_from_hex_mapping_old_roles(access_str), false,
- pluginsd_function_execute_cb, parser);
-
- parser->user.data_collections_count++;
-
- return PARSER_RC_OK;
-}
-
-static void pluginsd_function_result_end(struct parser *parser, void *action_data) {
- STRING *key = action_data;
- if(key)
- dictionary_del(parser->inflight.functions, string2str(key));
- string_freez(key);
-
- parser->user.data_collections_count++;
-}
-
-static inline struct inflight_function *inflight_function_find(PARSER *parser, const char *transaction) {
- struct inflight_function *pf = NULL;
-
- if(transaction && *transaction)
- pf = (struct inflight_function *)dictionary_get(parser->inflight.functions, transaction);
-
- if(!pf)
- netdata_log_error("got a " PLUGINSD_KEYWORD_FUNCTION_RESULT_BEGIN " for transaction '%s', but the transaction is not found.", transaction ? transaction : "(unset)");
-
- return pf;
-}
-
-PARSER_RC pluginsd_function_result_begin(char **words, size_t num_words, PARSER *parser) {
- char *transaction = get_word(words, num_words, 1);
- char *status = get_word(words, num_words, 2);
- char *format = get_word(words, num_words, 3);
- char *expires = get_word(words, num_words, 4);
-
- if (unlikely(!transaction || !*transaction || !status || !*status || !format || !*format || !expires || !*expires)) {
- netdata_log_error("got a " PLUGINSD_KEYWORD_FUNCTION_RESULT_BEGIN " without providing the required data (key = '%s', status = '%s', format = '%s', expires = '%s')."
- , transaction ? transaction : "(unset)"
- , status ? status : "(unset)"
- , format ? format : "(unset)"
- , expires ? expires : "(unset)"
- );
- }
-
- int code = (status && *status) ? str2i(status) : 0;
- if (code <= 0)
- code = HTTP_RESP_BACKEND_RESPONSE_INVALID;
-
- time_t expiration = (expires && *expires) ? str2l(expires) : 0;
-
- struct inflight_function *pf = inflight_function_find(parser, transaction);
- if(pf) {
- if(format && *format)
- pf->result_body_wb->content_type = content_type_string2id(format);
-
- pf->code = code;
-
- pf->result_body_wb->expires = expiration;
- if(expiration <= now_realtime_sec())
- buffer_no_cacheable(pf->result_body_wb);
- else
- buffer_cacheable(pf->result_body_wb);
- }
-
- parser->defer.response = (pf) ? pf->result_body_wb : NULL;
- parser->defer.end_keyword = PLUGINSD_KEYWORD_FUNCTION_RESULT_END;
- parser->defer.action = pluginsd_function_result_end;
- parser->defer.action_data = string_strdupz(transaction); // it is ok is key is NULL
- parser->flags |= PARSER_DEFER_UNTIL_KEYWORD;
-
- return PARSER_RC_OK;
-}
-
-PARSER_RC pluginsd_function_progress(char **words, size_t num_words, PARSER *parser) {
- size_t i = 1;
-
- char *transaction = get_word(words, num_words, i++);
- char *done_str = get_word(words, num_words, i++);
- char *all_str = get_word(words, num_words, i++);
-
- struct inflight_function *pf = inflight_function_find(parser, transaction);
- if(pf) {
- size_t done = done_str && *done_str ? str2u(done_str) : 0;
- size_t all = all_str && *all_str ? str2u(all_str) : 0;
-
- if(pf->progress.cb)
- pf->progress.cb(pf->progress.data, done, all);
- }
-
- return PARSER_RC_OK;
-}
diff --git a/src/collectors/plugins.d/pluginsd_functions.h b/src/collectors/plugins.d/pluginsd_functions.h
deleted file mode 100644
index ad47dc23a..000000000
--- a/src/collectors/plugins.d/pluginsd_functions.h
+++ /dev/null
@@ -1,48 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-#ifndef NETDATA_PLUGINSD_FUNCTIONS_H
-#define NETDATA_PLUGINSD_FUNCTIONS_H
-
-#include "pluginsd_internals.h"
-
-struct inflight_function {
- nd_uuid_t transaction;
-
- int code;
- int timeout_s;
- STRING *function;
- BUFFER *payload;
- HTTP_ACCESS access;
- const char *source;
-
- BUFFER *result_body_wb;
-
- usec_t *stop_monotonic_ut; // pointer to caller data
- usec_t started_monotonic_ut;
- usec_t sent_monotonic_ut;
- PARSER *parser;
-
- bool sent_successfully;
-
- struct {
- rrd_function_result_callback_t cb;
- void *data;
- } result;
-
- struct {
- rrd_function_progress_cb_t cb;
- void *data;
- } progress;
-};
-
-PARSER_RC pluginsd_function(char **words, size_t num_words, PARSER *parser);
-PARSER_RC pluginsd_function_result_begin(char **words, size_t num_words, PARSER *parser);
-PARSER_RC pluginsd_function_progress(char **words, size_t num_words, PARSER *parser);
-
-void pluginsd_inflight_functions_init(PARSER *parser);
-void pluginsd_inflight_functions_cleanup(PARSER *parser);
-void pluginsd_inflight_functions_garbage_collect(PARSER *parser, usec_t now_ut);
-
-int pluginsd_function_execute_cb(struct rrd_function_execute *rfe, void *data);
-
-#endif //NETDATA_PLUGINSD_FUNCTIONS_H
diff --git a/src/collectors/plugins.d/pluginsd_internals.c b/src/collectors/plugins.d/pluginsd_internals.c
deleted file mode 100644
index 31f0f7539..000000000
--- a/src/collectors/plugins.d/pluginsd_internals.c
+++ /dev/null
@@ -1,120 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-#include "pluginsd_internals.h"
-
-ssize_t send_to_plugin(const char *txt, void *data) {
- PARSER *parser = data;
-
- if(!txt || !*txt)
- return 0;
-
-#ifdef ENABLE_H2O
- if(parser->h2o_ctx)
- return h2o_stream_write(parser->h2o_ctx, txt, strlen(txt));
-#endif
-
- errno_clear();
- spinlock_lock(&parser->writer.spinlock);
- ssize_t bytes = -1;
-
-#ifdef ENABLE_HTTPS
- NETDATA_SSL *ssl = parser->ssl_output;
- if(ssl) {
-
- if(SSL_connection(ssl))
- bytes = netdata_ssl_write(ssl, (void *) txt, strlen(txt));
-
- else
- netdata_log_error("PLUGINSD: cannot send command (SSL)");
-
- spinlock_unlock(&parser->writer.spinlock);
- return bytes;
- }
-#endif
-
- if(parser->fp_output) {
-
- bytes = fprintf(parser->fp_output, "%s", txt);
- if(bytes <= 0) {
- netdata_log_error("PLUGINSD: cannot send command (FILE)");
- bytes = -2;
- }
- else
- fflush(parser->fp_output);
-
- spinlock_unlock(&parser->writer.spinlock);
- return bytes;
- }
-
- if(parser->fd != -1) {
- bytes = 0;
- ssize_t total = (ssize_t)strlen(txt);
- ssize_t sent;
-
- do {
- sent = write(parser->fd, &txt[bytes], total - bytes);
- if(sent <= 0) {
- netdata_log_error("PLUGINSD: cannot send command (fd)");
- spinlock_unlock(&parser->writer.spinlock);
- return -3;
- }
- bytes += sent;
- }
- while(bytes < total);
-
- spinlock_unlock(&parser->writer.spinlock);
- return (int)bytes;
- }
-
- spinlock_unlock(&parser->writer.spinlock);
- netdata_log_error("PLUGINSD: cannot send command (no output socket/pipe/file given to plugins.d parser)");
- return -4;
-}
-
-PARSER_RC PLUGINSD_DISABLE_PLUGIN(PARSER *parser, const char *keyword, const char *msg) {
- parser->user.enabled = 0;
-
- if(keyword && msg) {
- nd_log_limit_static_global_var(erl, 1, 0);
- nd_log_limit(&erl, NDLS_COLLECTORS, NDLP_INFO,
- "PLUGINSD: keyword %s: %s", keyword, msg);
- }
-
- return PARSER_RC_ERROR;
-}
-
-void pluginsd_keywords_init(PARSER *parser, PARSER_REPERTOIRE repertoire) {
- parser_init_repertoire(parser, repertoire);
-
- if (repertoire & (PARSER_INIT_PLUGINSD | PARSER_INIT_STREAMING))
- pluginsd_inflight_functions_init(parser);
-}
-
-void parser_destroy(PARSER *parser) {
- if (unlikely(!parser))
- return;
-
- pluginsd_inflight_functions_cleanup(parser);
-
- freez(parser);
-}
-
-
-PARSER *parser_init(struct parser_user_object *user, FILE *fp_input, FILE *fp_output, int fd,
- PARSER_INPUT_TYPE flags, void *ssl __maybe_unused) {
- PARSER *parser;
-
- parser = callocz(1, sizeof(*parser));
- if(user)
- parser->user = *user;
- parser->fd = fd;
- parser->fp_input = fp_input;
- parser->fp_output = fp_output;
-#ifdef ENABLE_HTTPS
- parser->ssl_output = ssl;
-#endif
- parser->flags = flags;
-
- spinlock_init(&parser->writer.spinlock);
- return parser;
-}
diff --git a/src/collectors/plugins.d/pluginsd_internals.h b/src/collectors/plugins.d/pluginsd_internals.h
deleted file mode 100644
index ae7e99427..000000000
--- a/src/collectors/plugins.d/pluginsd_internals.h
+++ /dev/null
@@ -1,355 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-#ifndef NETDATA_PLUGINSD_INTERNALS_H
-#define NETDATA_PLUGINSD_INTERNALS_H
-
-#include "pluginsd_parser.h"
-#include "pluginsd_functions.h"
-#include "pluginsd_dyncfg.h"
-#include "pluginsd_replication.h"
-
-#define SERVING_STREAMING(parser) ((parser)->repertoire == PARSER_INIT_STREAMING)
-#define SERVING_PLUGINSD(parser) ((parser)->repertoire == PARSER_INIT_PLUGINSD)
-
-PARSER_RC PLUGINSD_DISABLE_PLUGIN(PARSER *parser, const char *keyword, const char *msg);
-
-ssize_t send_to_plugin(const char *txt, void *data);
-
-static inline RRDHOST *pluginsd_require_scope_host(PARSER *parser, const char *cmd) {
- RRDHOST *host = parser->user.host;
-
- if(unlikely(!host))
- netdata_log_error("PLUGINSD: command %s requires a host, but is not set.", cmd);
-
- return host;
-}
-
-static inline RRDSET *pluginsd_require_scope_chart(PARSER *parser, const char *cmd, const char *parent_cmd) {
- RRDSET *st = parser->user.st;
-
- if(unlikely(!st))
- netdata_log_error("PLUGINSD: command %s requires a chart defined via command %s, but is not set.", cmd, parent_cmd);
-
- return st;
-}
-
-static inline RRDSET *pluginsd_get_scope_chart(PARSER *parser) {
- return parser->user.st;
-}
-
-static inline void pluginsd_lock_rrdset_data_collection(PARSER *parser) {
- if(parser->user.st && !parser->user.v2.locked_data_collection) {
- spinlock_lock(&parser->user.st->data_collection_lock);
- parser->user.v2.locked_data_collection = true;
- }
-}
-
-static inline bool pluginsd_unlock_rrdset_data_collection(PARSER *parser) {
- if(parser->user.st && parser->user.v2.locked_data_collection) {
- spinlock_unlock(&parser->user.st->data_collection_lock);
- parser->user.v2.locked_data_collection = false;
- return true;
- }
-
- return false;
-}
-
-static inline void pluginsd_unlock_previous_scope_chart(PARSER *parser, const char *keyword, bool stale) {
- if(unlikely(pluginsd_unlock_rrdset_data_collection(parser))) {
- if(stale)
- netdata_log_error("PLUGINSD: 'host:%s/chart:%s/' stale data collection lock found during %s; it has been unlocked",
- rrdhost_hostname(parser->user.st->rrdhost),
- rrdset_id(parser->user.st),
- keyword);
- }
-
- if(unlikely(parser->user.v2.ml_locked)) {
- ml_chart_update_end(parser->user.st);
- parser->user.v2.ml_locked = false;
-
- if(stale)
- netdata_log_error("PLUGINSD: 'host:%s/chart:%s/' stale ML lock found during %s, it has been unlocked",
- rrdhost_hostname(parser->user.st->rrdhost),
- rrdset_id(parser->user.st),
- keyword);
- }
-}
-
-static inline void pluginsd_clear_scope_chart(PARSER *parser, const char *keyword) {
- pluginsd_unlock_previous_scope_chart(parser, keyword, true);
-
- if(parser->user.cleanup_slots && parser->user.st)
- rrdset_pluginsd_receive_unslot(parser->user.st);
-
- parser->user.st = NULL;
- parser->user.cleanup_slots = false;
-}
-
-static inline bool pluginsd_set_scope_chart(PARSER *parser, RRDSET *st, const char *keyword) {
- RRDSET *old_st = parser->user.st;
- pid_t old_collector_tid = (old_st) ? old_st->pluginsd.collector_tid : 0;
- pid_t my_collector_tid = gettid_cached();
-
- if(unlikely(old_collector_tid)) {
- if(old_collector_tid != my_collector_tid) {
- nd_log_limit_static_global_var(erl, 1, 0);
- nd_log_limit(&erl, NDLS_COLLECTORS, NDLP_WARNING,
- "PLUGINSD: keyword %s: 'host:%s/chart:%s' is collected twice (my tid %d, other collector tid %d)",
- keyword ? keyword : "UNKNOWN",
- rrdhost_hostname(st->rrdhost), rrdset_id(st),
- my_collector_tid, old_collector_tid);
-
- return false;
- }
-
- old_st->pluginsd.collector_tid = 0;
- }
-
- st->pluginsd.collector_tid = my_collector_tid;
-
- pluginsd_clear_scope_chart(parser, keyword);
-
- st->pluginsd.pos = 0;
- parser->user.st = st;
- parser->user.cleanup_slots = false;
-
- return true;
-}
-
-static inline void pluginsd_rrddim_put_to_slot(PARSER *parser, RRDSET *st, RRDDIM *rd, ssize_t slot, bool obsolete) {
- size_t wanted_size = st->pluginsd.size;
-
- if(slot >= 1) {
- st->pluginsd.dims_with_slots = true;
- wanted_size = slot;
- }
- else {
- st->pluginsd.dims_with_slots = false;
- wanted_size = dictionary_entries(st->rrddim_root_index);
- }
-
- if(wanted_size > st->pluginsd.size) {
- st->pluginsd.prd_array = reallocz(st->pluginsd.prd_array, wanted_size * sizeof(struct pluginsd_rrddim));
-
- // initialize the empty slots
- for(ssize_t i = (ssize_t) wanted_size - 1; i >= (ssize_t) st->pluginsd.size; i--) {
- st->pluginsd.prd_array[i].rda = NULL;
- st->pluginsd.prd_array[i].rd = NULL;
- st->pluginsd.prd_array[i].id = NULL;
- }
-
- st->pluginsd.size = wanted_size;
- }
-
- if(st->pluginsd.dims_with_slots) {
- struct pluginsd_rrddim *prd = &st->pluginsd.prd_array[slot - 1];
-
- if(prd->rd != rd) {
- prd->rda = rrddim_find_and_acquire(st, string2str(rd->id));
- prd->rd = rrddim_acquired_to_rrddim(prd->rda);
- prd->id = string2str(prd->rd->id);
- }
-
- if(obsolete)
- parser->user.cleanup_slots = true;
- }
-}
-
-static inline RRDDIM *pluginsd_acquire_dimension(RRDHOST *host, RRDSET *st, const char *dimension, ssize_t slot, const char *cmd) {
- if (unlikely(!dimension || !*dimension)) {
- netdata_log_error("PLUGINSD: 'host:%s/chart:%s' got a %s, without a dimension.",
- rrdhost_hostname(host), rrdset_id(st), cmd);
- return NULL;
- }
-
- if (unlikely(!st->pluginsd.size)) {
- netdata_log_error("PLUGINSD: 'host:%s/chart:%s' got a %s, but the chart has no dimensions.",
- rrdhost_hostname(host), rrdset_id(st), cmd);
- return NULL;
- }
-
- struct pluginsd_rrddim *prd;
- RRDDIM *rd;
-
- if(likely(st->pluginsd.dims_with_slots)) {
- // caching with slots
-
- if(unlikely(slot < 1 || slot > st->pluginsd.size)) {
- netdata_log_error("PLUGINSD: 'host:%s/chart:%s' got a %s with slot %zd, but slots in the range [1 - %u] are expected.",
- rrdhost_hostname(host), rrdset_id(st), cmd, slot, st->pluginsd.size);
- return NULL;
- }
-
- prd = &st->pluginsd.prd_array[slot - 1];
-
- rd = prd->rd;
- if(likely(rd)) {
-#ifdef NETDATA_INTERNAL_CHECKS
- if(strcmp(prd->id, dimension) != 0) {
- ssize_t t;
- for(t = 0; t < st->pluginsd.size ;t++) {
- if (strcmp(st->pluginsd.prd_array[t].id, dimension) == 0)
- break;
- }
- if(t >= st->pluginsd.size)
- t = -1;
-
- internal_fatal(true,
- "PLUGINSD: expected to find dimension '%s' on slot %zd, but found '%s', "
- "the right slot is %zd",
- dimension, slot, prd->id, t);
- }
-#endif
- return rd;
- }
- }
- else {
- // caching without slots
-
- if(unlikely(st->pluginsd.pos >= st->pluginsd.size))
- st->pluginsd.pos = 0;
-
- prd = &st->pluginsd.prd_array[st->pluginsd.pos++];
-
- rd = prd->rd;
- if(likely(rd)) {
- const char *id = prd->id;
-
- if(strcmp(id, dimension) == 0) {
- // we found it cached
- return rd;
- }
- else {
- // the cached one is not good for us
- rrddim_acquired_release(prd->rda);
- prd->rda = NULL;
- prd->rd = NULL;
- prd->id = NULL;
- }
- }
- }
-
- // we need to find the dimension and set it to prd
-
- RRDDIM_ACQUIRED *rda = rrddim_find_and_acquire(st, dimension);
- if (unlikely(!rda)) {
- netdata_log_error("PLUGINSD: 'host:%s/chart:%s/dim:%s' got a %s but dimension does not exist.",
- rrdhost_hostname(host), rrdset_id(st), dimension, cmd);
-
- return NULL;
- }
-
- prd->rda = rda;
- prd->rd = rd = rrddim_acquired_to_rrddim(rda);
- prd->id = string2str(rd->id);
-
- return rd;
-}
-
-static inline RRDSET *pluginsd_find_chart(RRDHOST *host, const char *chart, const char *cmd) {
- if (unlikely(!chart || !*chart)) {
- netdata_log_error("PLUGINSD: 'host:%s' got a %s without a chart id.",
- rrdhost_hostname(host), cmd);
- return NULL;
- }
-
- RRDSET *st = rrdset_find(host, chart);
- if (unlikely(!st))
- netdata_log_error("PLUGINSD: 'host:%s/chart:%s' got a %s but chart does not exist.",
- rrdhost_hostname(host), chart, cmd);
-
- return st;
-}
-
-static inline ssize_t pluginsd_parse_rrd_slot(char **words, size_t num_words) {
- ssize_t slot = -1;
- char *id = get_word(words, num_words, 1);
- if(id && id[0] == PLUGINSD_KEYWORD_SLOT[0] && id[1] == PLUGINSD_KEYWORD_SLOT[1] &&
- id[2] == PLUGINSD_KEYWORD_SLOT[2] && id[3] == PLUGINSD_KEYWORD_SLOT[3] && id[4] == ':') {
- slot = (ssize_t) str2ull_encoded(&id[5]);
- if(slot < 0) slot = 0; // to make the caller increment its idx of the words
- }
-
- return slot;
-}
-
-static inline void pluginsd_rrdset_cache_put_to_slot(PARSER *parser, RRDSET *st, ssize_t slot, bool obsolete) {
- // clean possible old cached data
- rrdset_pluginsd_receive_unslot(st);
-
- if(unlikely(slot < 1 || slot >= INT32_MAX))
- return;
-
- RRDHOST *host = st->rrdhost;
-
- if(unlikely((size_t)slot > host->rrdpush.receive.pluginsd_chart_slots.size)) {
- spinlock_lock(&host->rrdpush.receive.pluginsd_chart_slots.spinlock);
- size_t old_slots = host->rrdpush.receive.pluginsd_chart_slots.size;
- size_t new_slots = (old_slots < PLUGINSD_MIN_RRDSET_POINTERS_CACHE) ? PLUGINSD_MIN_RRDSET_POINTERS_CACHE : old_slots * 2;
-
- if(new_slots < (size_t)slot)
- new_slots = slot;
-
- host->rrdpush.receive.pluginsd_chart_slots.array =
- reallocz(host->rrdpush.receive.pluginsd_chart_slots.array, new_slots * sizeof(RRDSET *));
-
- for(size_t i = old_slots; i < new_slots ;i++)
- host->rrdpush.receive.pluginsd_chart_slots.array[i] = NULL;
-
- host->rrdpush.receive.pluginsd_chart_slots.size = new_slots;
- spinlock_unlock(&host->rrdpush.receive.pluginsd_chart_slots.spinlock);
- }
-
- host->rrdpush.receive.pluginsd_chart_slots.array[slot - 1] = st;
- st->pluginsd.last_slot = (int32_t)slot - 1;
- parser->user.cleanup_slots = obsolete;
-}
-
-static inline RRDSET *pluginsd_rrdset_cache_get_from_slot(PARSER *parser, RRDHOST *host, const char *id, ssize_t slot, const char *keyword) {
- if(unlikely(slot < 1 || (size_t)slot > host->rrdpush.receive.pluginsd_chart_slots.size))
- return pluginsd_find_chart(host, id, keyword);
-
- RRDSET *st = host->rrdpush.receive.pluginsd_chart_slots.array[slot - 1];
-
- if(!st) {
- st = pluginsd_find_chart(host, id, keyword);
- if(st)
- pluginsd_rrdset_cache_put_to_slot(parser, st, slot, rrdset_flag_check(st, RRDSET_FLAG_OBSOLETE));
- }
- else {
- internal_fatal(string_strcmp(st->id, id) != 0,
- "PLUGINSD: wrong chart in slot %zd, expected '%s', found '%s'",
- slot - 1, id, string2str(st->id));
- }
-
- return st;
-}
-
-static inline SN_FLAGS pluginsd_parse_storage_number_flags(const char *flags_str) {
- SN_FLAGS flags = SN_FLAG_NONE;
-
- char c;
- while ((c = *flags_str++)) {
- switch (c) {
- case 'A':
- flags |= SN_FLAG_NOT_ANOMALOUS;
- break;
-
- case 'R':
- flags |= SN_FLAG_RESET;
- break;
-
- case 'E':
- flags = SN_EMPTY_SLOT;
- return flags;
-
- default:
- internal_error(true, "Unknown SN_FLAGS flag '%c'", c);
- break;
- }
- }
-
- return flags;
-}
-
-#endif //NETDATA_PLUGINSD_INTERNALS_H
diff --git a/src/collectors/plugins.d/pluginsd_parser.c b/src/collectors/plugins.d/pluginsd_parser.c
deleted file mode 100644
index d15ecbe94..000000000
--- a/src/collectors/plugins.d/pluginsd_parser.c
+++ /dev/null
@@ -1,1402 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-#include "pluginsd_internals.h"
-
-static inline PARSER_RC pluginsd_set(char **words, size_t num_words, PARSER *parser) {
- int idx = 1;
- ssize_t slot = pluginsd_parse_rrd_slot(words, num_words);
- if(slot >= 0) idx++;
-
- char *dimension = get_word(words, num_words, idx++);
- char *value = get_word(words, num_words, idx++);
-
- RRDHOST *host = pluginsd_require_scope_host(parser, PLUGINSD_KEYWORD_SET);
- if(!host) return PLUGINSD_DISABLE_PLUGIN(parser, NULL, NULL);
-
- RRDSET *st = pluginsd_require_scope_chart(parser, PLUGINSD_KEYWORD_SET, PLUGINSD_KEYWORD_CHART);
- if(!st) return PLUGINSD_DISABLE_PLUGIN(parser, NULL, NULL);
-
- RRDDIM *rd = pluginsd_acquire_dimension(host, st, dimension, slot, PLUGINSD_KEYWORD_SET);
- if(!rd) return PLUGINSD_DISABLE_PLUGIN(parser, NULL, NULL);
-
- st->pluginsd.set = true;
-
- if (unlikely(rrdset_flag_check(st, RRDSET_FLAG_DEBUG)))
- netdata_log_debug(D_PLUGINSD, "PLUGINSD: 'host:%s/chart:%s/dim:%s' SET is setting value to '%s'",
- rrdhost_hostname(host), rrdset_id(st), dimension, value && *value ? value : "UNSET");
-
- if (value && *value)
- rrddim_set_by_pointer(st, rd, str2ll_encoded(value));
-
- return PARSER_RC_OK;
-}
-
-static inline PARSER_RC pluginsd_begin(char **words, size_t num_words, PARSER *parser) {
- int idx = 1;
- ssize_t slot = pluginsd_parse_rrd_slot(words, num_words);
- if(slot >= 0) idx++;
-
- char *id = get_word(words, num_words, idx++);
- char *microseconds_txt = get_word(words, num_words, idx++);
-
- RRDHOST *host = pluginsd_require_scope_host(parser, PLUGINSD_KEYWORD_BEGIN);
- if(!host) return PLUGINSD_DISABLE_PLUGIN(parser, NULL, NULL);
-
- RRDSET *st = pluginsd_rrdset_cache_get_from_slot(parser, host, id, slot, PLUGINSD_KEYWORD_BEGIN);
- if(!st) return PLUGINSD_DISABLE_PLUGIN(parser, NULL, NULL);
-
- if(!pluginsd_set_scope_chart(parser, st, PLUGINSD_KEYWORD_BEGIN))
- return PLUGINSD_DISABLE_PLUGIN(parser, NULL, NULL);
-
- usec_t microseconds = 0;
- if (microseconds_txt && *microseconds_txt) {
- long long t = str2ll(microseconds_txt, NULL);
- if(t >= 0)
- microseconds = t;
- }
-
-#ifdef NETDATA_LOG_REPLICATION_REQUESTS
- if(st->replay.log_next_data_collection) {
- st->replay.log_next_data_collection = false;
-
- internal_error(true,
- "REPLAY: 'host:%s/chart:%s' first BEGIN after replication, last collected %llu, last updated %llu, microseconds %llu",
- rrdhost_hostname(host), rrdset_id(st),
- st->last_collected_time.tv_sec * USEC_PER_SEC + st->last_collected_time.tv_usec,
- st->last_updated.tv_sec * USEC_PER_SEC + st->last_updated.tv_usec,
- microseconds
- );
- }
-#endif
-
- if (likely(st->counter_done)) {
- if (likely(microseconds)) {
- if (parser->user.trust_durations)
- rrdset_next_usec_unfiltered(st, microseconds);
- else
- rrdset_next_usec(st, microseconds);
- }
- else
- rrdset_next(st);
- }
- return PARSER_RC_OK;
-}
-
-static inline PARSER_RC pluginsd_end(char **words, size_t num_words, PARSER *parser) {
- char *tv_sec = get_word(words, num_words, 1);
- char *tv_usec = get_word(words, num_words, 2);
- char *pending_rrdset_next = get_word(words, num_words, 3);
-
- RRDHOST *host = pluginsd_require_scope_host(parser, PLUGINSD_KEYWORD_END);
- if(!host) return PLUGINSD_DISABLE_PLUGIN(parser, NULL, NULL);
-
- RRDSET *st = pluginsd_require_scope_chart(parser, PLUGINSD_KEYWORD_END, PLUGINSD_KEYWORD_BEGIN);
- if(!st) return PLUGINSD_DISABLE_PLUGIN(parser, NULL, NULL);
-
- if (unlikely(rrdset_flag_check(st, RRDSET_FLAG_DEBUG)))
- netdata_log_debug(D_PLUGINSD, "requested an END on chart '%s'", rrdset_id(st));
-
- pluginsd_clear_scope_chart(parser, PLUGINSD_KEYWORD_END);
- parser->user.data_collections_count++;
-
- struct timeval tv = {
- .tv_sec = (tv_sec && *tv_sec) ? str2ll(tv_sec, NULL) : 0,
- .tv_usec = (tv_usec && *tv_usec) ? str2ll(tv_usec, NULL) : 0
- };
-
- if(!tv.tv_sec)
- now_realtime_timeval(&tv);
-
- rrdset_timed_done(st, tv, pending_rrdset_next && *pending_rrdset_next ? true : false);
-
- return PARSER_RC_OK;
-}
-
-static void pluginsd_host_define_cleanup(PARSER *parser) {
- string_freez(parser->user.host_define.hostname);
- rrdlabels_destroy(parser->user.host_define.rrdlabels);
-
- parser->user.host_define.hostname = NULL;
- parser->user.host_define.rrdlabels = NULL;
- parser->user.host_define.parsing_host = false;
-}
-
-static inline bool pluginsd_validate_machine_guid(const char *guid, nd_uuid_t *uuid, char *output) {
- if(uuid_parse(guid, *uuid))
- return false;
-
- uuid_unparse_lower(*uuid, output);
-
- return true;
-}
-
-static inline PARSER_RC pluginsd_host_define(char **words, size_t num_words, PARSER *parser) {
- char *guid = get_word(words, num_words, 1);
- char *hostname = get_word(words, num_words, 2);
-
- if(unlikely(!guid || !*guid || !hostname || !*hostname))
- return PLUGINSD_DISABLE_PLUGIN(parser, PLUGINSD_KEYWORD_HOST_DEFINE, "missing parameters");
-
- if(unlikely(parser->user.host_define.parsing_host))
- return PLUGINSD_DISABLE_PLUGIN(parser, PLUGINSD_KEYWORD_HOST_DEFINE,
- "another host definition is already open - did you send " PLUGINSD_KEYWORD_HOST_DEFINE_END "?");
-
- if(!pluginsd_validate_machine_guid(guid, &parser->user.host_define.machine_guid, parser->user.host_define.machine_guid_str))
- return PLUGINSD_DISABLE_PLUGIN(parser, PLUGINSD_KEYWORD_HOST_DEFINE, "cannot parse MACHINE_GUID - is it a valid UUID?");
-
- parser->user.host_define.hostname = string_strdupz(hostname);
- parser->user.host_define.rrdlabels = rrdlabels_create();
- parser->user.host_define.parsing_host = true;
-
- return PARSER_RC_OK;
-}
-
-static inline PARSER_RC pluginsd_host_dictionary(char **words, size_t num_words, PARSER *parser, RRDLABELS *labels, const char *keyword) {
- char *name = get_word(words, num_words, 1);
- char *value = get_word(words, num_words, 2);
-
- if(!name || !*name || !value)
- return PLUGINSD_DISABLE_PLUGIN(parser, keyword, "missing parameters");
-
- if(!parser->user.host_define.parsing_host || !labels)
- return PLUGINSD_DISABLE_PLUGIN(parser, keyword, "host is not defined, send " PLUGINSD_KEYWORD_HOST_DEFINE " before this");
-
- rrdlabels_add(labels, name, value, RRDLABEL_SRC_CONFIG);
-
- return PARSER_RC_OK;
-}
-
-static inline PARSER_RC pluginsd_host_labels(char **words, size_t num_words, PARSER *parser) {
- return pluginsd_host_dictionary(words, num_words, parser,
- parser->user.host_define.rrdlabels,
- PLUGINSD_KEYWORD_HOST_LABEL);
-}
-
-static inline PARSER_RC pluginsd_host_define_end(char **words __maybe_unused, size_t num_words __maybe_unused, PARSER *parser) {
- if(!parser->user.host_define.parsing_host)
- return PLUGINSD_DISABLE_PLUGIN(parser, PLUGINSD_KEYWORD_HOST_DEFINE_END, "missing initialization, send " PLUGINSD_KEYWORD_HOST_DEFINE " before this");
-
- RRDHOST *host = rrdhost_find_or_create(
- string2str(parser->user.host_define.hostname),
- string2str(parser->user.host_define.hostname),
- parser->user.host_define.machine_guid_str,
- "Netdata Virtual Host 1.0",
- netdata_configured_timezone,
- netdata_configured_abbrev_timezone,
- netdata_configured_utc_offset,
- program_name,
- NETDATA_VERSION,
- default_rrd_update_every,
- default_rrd_history_entries,
- default_rrd_memory_mode,
- health_plugin_enabled(),
- default_rrdpush_enabled,
- default_rrdpush_destination,
- default_rrdpush_api_key,
- default_rrdpush_send_charts_matching,
- default_rrdpush_enable_replication,
- default_rrdpush_seconds_to_replicate,
- default_rrdpush_replication_step,
- rrdhost_labels_to_system_info(parser->user.host_define.rrdlabels),
- false);
-
- rrdhost_option_set(host, RRDHOST_OPTION_VIRTUAL_HOST);
- dyncfg_host_init(host);
-
- if(host->rrdlabels) {
- rrdlabels_migrate_to_these(host->rrdlabels, parser->user.host_define.rrdlabels);
- }
- else {
- host->rrdlabels = parser->user.host_define.rrdlabels;
- parser->user.host_define.rrdlabels = NULL;
- }
-
- pluginsd_host_define_cleanup(parser);
-
- parser->user.host = host;
- pluginsd_clear_scope_chart(parser, PLUGINSD_KEYWORD_HOST_DEFINE_END);
-
- rrdhost_flag_clear(host, RRDHOST_FLAG_ORPHAN);
- rrdcontext_host_child_connected(host);
- schedule_node_info_update(host);
-
- return PARSER_RC_OK;
-}
-
-static inline PARSER_RC pluginsd_host(char **words, size_t num_words, PARSER *parser) {
- char *guid = get_word(words, num_words, 1);
-
- if(!guid || !*guid || strcmp(guid, "localhost") == 0) {
- parser->user.host = localhost;
- return PARSER_RC_OK;
- }
-
- nd_uuid_t uuid;
- char uuid_str[UUID_STR_LEN];
- if(!pluginsd_validate_machine_guid(guid, &uuid, uuid_str))
- return PLUGINSD_DISABLE_PLUGIN(parser, PLUGINSD_KEYWORD_HOST, "cannot parse MACHINE_GUID - is it a valid UUID?");
-
- RRDHOST *host = rrdhost_find_by_guid(uuid_str);
- if(unlikely(!host))
- return PLUGINSD_DISABLE_PLUGIN(parser, PLUGINSD_KEYWORD_HOST, "cannot find a host with this machine guid - have you created it?");
-
- parser->user.host = host;
-
- return PARSER_RC_OK;
-}
-
-static inline PARSER_RC pluginsd_chart(char **words, size_t num_words, PARSER *parser) {
- RRDHOST *host = pluginsd_require_scope_host(parser, PLUGINSD_KEYWORD_CHART);
- if(!host) return PLUGINSD_DISABLE_PLUGIN(parser, NULL, NULL);
-
- int idx = 1;
- ssize_t slot = pluginsd_parse_rrd_slot(words, num_words);
- if(slot >= 0) idx++;
-
- char *type = get_word(words, num_words, idx++);
- char *name = get_word(words, num_words, idx++);
- char *title = get_word(words, num_words, idx++);
- char *units = get_word(words, num_words, idx++);
- char *family = get_word(words, num_words, idx++);
- char *context = get_word(words, num_words, idx++);
- char *chart = get_word(words, num_words, idx++);
- char *priority_s = get_word(words, num_words, idx++);
- char *update_every_s = get_word(words, num_words, idx++);
- char *options = get_word(words, num_words, idx++);
- char *plugin = get_word(words, num_words, idx++);
- char *module = get_word(words, num_words, idx++);
-
- // parse the id from type
- char *id = NULL;
- if (likely(type && (id = strchr(type, '.')))) {
- *id = '\0';
- id++;
- }
-
- // make sure we have the required variables
- if (unlikely((!type || !*type || !id || !*id)))
- return PLUGINSD_DISABLE_PLUGIN(parser, PLUGINSD_KEYWORD_CHART, "missing parameters");
-
- // parse the name, and make sure it does not include 'type.'
- if (unlikely(name && *name)) {
- // when data are streamed from child nodes
- // name will be type.name
- // so, we have to remove 'type.' from name too
- size_t len = strlen(type);
- if (strncmp(type, name, len) == 0 && name[len] == '.')
- name = &name[len + 1];
-
- // if the name is the same with the id,
- // or is just 'NULL', clear it.
- if (unlikely(strcmp(name, id) == 0 || strcasecmp(name, "NULL") == 0 || strcasecmp(name, "(NULL)") == 0))
- name = NULL;
- }
-
- int priority = 1000;
- if (likely(priority_s && *priority_s))
- priority = str2i(priority_s);
-
- int update_every = parser->user.cd->update_every;
- if (likely(update_every_s && *update_every_s))
- update_every = str2i(update_every_s);
- if (unlikely(!update_every))
- update_every = parser->user.cd->update_every;
-
- RRDSET_TYPE chart_type = RRDSET_TYPE_LINE;
- if (unlikely(chart))
- chart_type = rrdset_type_id(chart);
-
- if (unlikely(name && !*name))
- name = NULL;
- if (unlikely(family && !*family))
- family = NULL;
- if (unlikely(context && !*context))
- context = NULL;
- if (unlikely(!title))
- title = "";
- if (unlikely(!units))
- units = "unknown";
-
- netdata_log_debug(
- D_PLUGINSD,
- "creating chart type='%s', id='%s', name='%s', family='%s', context='%s', chart='%s', priority=%d, update_every=%d",
- type, id, name ? name : "", family ? family : "", context ? context : "", rrdset_type_name(chart_type),
- priority, update_every);
-
- RRDSET *st = NULL;
-
- st = rrdset_create(
- host, type, id, name, family, context, title, units,
- (plugin && *plugin) ? plugin : parser->user.cd->filename,
- module, priority, update_every,
- chart_type);
-
- bool obsolete = false;
- if (likely(st)) {
- if (options && *options) {
- if (strstr(options, "obsolete")) {
- rrdset_is_obsolete___safe_from_collector_thread(st);
- obsolete = true;
- }
- else
- rrdset_isnot_obsolete___safe_from_collector_thread(st);
-
- if (strstr(options, "detail"))
- rrdset_flag_set(st, RRDSET_FLAG_DETAIL);
- else
- rrdset_flag_clear(st, RRDSET_FLAG_DETAIL);
-
- if (strstr(options, "hidden"))
- rrdset_flag_set(st, RRDSET_FLAG_HIDDEN);
- else
- rrdset_flag_clear(st, RRDSET_FLAG_HIDDEN);
-
- if (strstr(options, "store_first"))
- rrdset_flag_set(st, RRDSET_FLAG_STORE_FIRST);
- else
- rrdset_flag_clear(st, RRDSET_FLAG_STORE_FIRST);
- }
- else {
- rrdset_isnot_obsolete___safe_from_collector_thread(st);
- rrdset_flag_clear(st, RRDSET_FLAG_DETAIL);
- rrdset_flag_clear(st, RRDSET_FLAG_STORE_FIRST);
- }
-
- if(!pluginsd_set_scope_chart(parser, st, PLUGINSD_KEYWORD_CHART))
- return PLUGINSD_DISABLE_PLUGIN(parser, NULL, NULL);
-
- pluginsd_rrdset_cache_put_to_slot(parser, st, slot, obsolete);
- }
- else
- pluginsd_clear_scope_chart(parser, PLUGINSD_KEYWORD_CHART);
-
- return PARSER_RC_OK;
-}
-
-static inline PARSER_RC pluginsd_chart_definition_end(char **words, size_t num_words, PARSER *parser) {
- const char *first_entry_txt = get_word(words, num_words, 1);
- const char *last_entry_txt = get_word(words, num_words, 2);
- const char *wall_clock_time_txt = get_word(words, num_words, 3);
-
- RRDHOST *host = pluginsd_require_scope_host(parser, PLUGINSD_KEYWORD_CHART_DEFINITION_END);
- if(!host) return PLUGINSD_DISABLE_PLUGIN(parser, NULL, NULL);
-
- RRDSET *st = pluginsd_require_scope_chart(parser, PLUGINSD_KEYWORD_CHART_DEFINITION_END, PLUGINSD_KEYWORD_CHART);
- if(!st) return PLUGINSD_DISABLE_PLUGIN(parser, NULL, NULL);
-
- time_t first_entry_child = (first_entry_txt && *first_entry_txt) ? (time_t)str2ul(first_entry_txt) : 0;
- time_t last_entry_child = (last_entry_txt && *last_entry_txt) ? (time_t)str2ul(last_entry_txt) : 0;
- time_t child_wall_clock_time = (wall_clock_time_txt && *wall_clock_time_txt) ? (time_t)str2ul(wall_clock_time_txt) : now_realtime_sec();
-
- bool ok = true;
- if(!rrdset_flag_check(st, RRDSET_FLAG_RECEIVER_REPLICATION_IN_PROGRESS)) {
-
-#ifdef NETDATA_LOG_REPLICATION_REQUESTS
- st->replay.start_streaming = false;
- st->replay.after = 0;
- st->replay.before = 0;
-#endif
-
- rrdset_flag_set(st, RRDSET_FLAG_RECEIVER_REPLICATION_IN_PROGRESS);
- rrdset_flag_clear(st, RRDSET_FLAG_RECEIVER_REPLICATION_FINISHED);
- rrdhost_receiver_replicating_charts_plus_one(st->rrdhost);
-
- ok = replicate_chart_request(send_to_plugin, parser, host, st,
- first_entry_child, last_entry_child, child_wall_clock_time,
- 0, 0);
- }
-#ifdef NETDATA_LOG_REPLICATION_REQUESTS
- else {
- internal_error(true, "REPLAY: 'host:%s/chart:%s' not sending duplicate replication request",
- rrdhost_hostname(st->rrdhost), rrdset_id(st));
- }
-#endif
-
- return ok ? PARSER_RC_OK : PARSER_RC_ERROR;
-}
-
-static inline PARSER_RC pluginsd_dimension(char **words, size_t num_words, PARSER *parser) {
- int idx = 1;
- ssize_t slot = pluginsd_parse_rrd_slot(words, num_words);
- if(slot >= 0) idx++;
-
- char *id = get_word(words, num_words, idx++);
- char *name = get_word(words, num_words, idx++);
- char *algorithm = get_word(words, num_words, idx++);
- char *multiplier_s = get_word(words, num_words, idx++);
- char *divisor_s = get_word(words, num_words, idx++);
- char *options = get_word(words, num_words, idx++);
-
- RRDHOST *host = pluginsd_require_scope_host(parser, PLUGINSD_KEYWORD_DIMENSION);
- if(!host) return PLUGINSD_DISABLE_PLUGIN(parser, NULL, NULL);
-
- RRDSET *st = pluginsd_require_scope_chart(parser, PLUGINSD_KEYWORD_DIMENSION, PLUGINSD_KEYWORD_CHART);
- if(!st) return PLUGINSD_DISABLE_PLUGIN(parser, NULL, NULL);
-
- if (unlikely(!id || !*id))
- return PLUGINSD_DISABLE_PLUGIN(parser, PLUGINSD_KEYWORD_DIMENSION, "missing dimension id");
-
- long multiplier = 1;
- if (multiplier_s && *multiplier_s) {
- multiplier = str2ll_encoded(multiplier_s);
- if (unlikely(!multiplier))
- multiplier = 1;
- }
-
- long divisor = 1;
- if (likely(divisor_s && *divisor_s)) {
- divisor = str2ll_encoded(divisor_s);
- if (unlikely(!divisor))
- divisor = 1;
- }
-
- if (unlikely(!algorithm || !*algorithm))
- algorithm = "absolute";
-
- if (unlikely(st && rrdset_flag_check(st, RRDSET_FLAG_DEBUG)))
- netdata_log_debug(
- D_PLUGINSD,
- "creating dimension in chart %s, id='%s', name='%s', algorithm='%s', multiplier=%ld, divisor=%ld, hidden='%s'",
- rrdset_id(st), id, name ? name : "", rrd_algorithm_name(rrd_algorithm_id(algorithm)), multiplier, divisor,
- options ? options : "");
-
- RRDDIM *rd = rrddim_add(st, id, name, multiplier, divisor, rrd_algorithm_id(algorithm));
- if (unlikely(!rd))
- return PLUGINSD_DISABLE_PLUGIN(parser, PLUGINSD_KEYWORD_DIMENSION, "failed to create dimension");
-
- int unhide_dimension = 1;
-
- rrddim_option_clear(rd, RRDDIM_OPTION_DONT_DETECT_RESETS_OR_OVERFLOWS);
- bool obsolete = false;
- if (options && *options) {
- if (strstr(options, "obsolete") != NULL) {
- obsolete = true;
- rrddim_is_obsolete___safe_from_collector_thread(st, rd);
- }
- else
- rrddim_isnot_obsolete___safe_from_collector_thread(st, rd);
-
- unhide_dimension = !strstr(options, "hidden");
-
- if (strstr(options, "noreset") != NULL)
- rrddim_option_set(rd, RRDDIM_OPTION_DONT_DETECT_RESETS_OR_OVERFLOWS);
- if (strstr(options, "nooverflow") != NULL)
- rrddim_option_set(rd, RRDDIM_OPTION_DONT_DETECT_RESETS_OR_OVERFLOWS);
- }
- else
- rrddim_isnot_obsolete___safe_from_collector_thread(st, rd);
-
- bool should_update_dimension = false;
-
- if (likely(unhide_dimension)) {
- rrddim_option_clear(rd, RRDDIM_OPTION_HIDDEN);
- should_update_dimension = rrddim_flag_check(rd, RRDDIM_FLAG_META_HIDDEN);
- }
- else {
- rrddim_option_set(rd, RRDDIM_OPTION_HIDDEN);
- should_update_dimension = !rrddim_flag_check(rd, RRDDIM_FLAG_META_HIDDEN);
- }
-
- if (should_update_dimension) {
- rrddim_flag_set(rd, RRDDIM_FLAG_METADATA_UPDATE);
- rrdhost_flag_set(rd->rrdset->rrdhost, RRDHOST_FLAG_METADATA_UPDATE);
- }
-
- pluginsd_rrddim_put_to_slot(parser, st, rd, slot, obsolete);
-
- return PARSER_RC_OK;
-}
-
-// ----------------------------------------------------------------------------
-
-static inline PARSER_RC pluginsd_variable(char **words, size_t num_words, PARSER *parser) {
- char *name = get_word(words, num_words, 1);
- char *value = get_word(words, num_words, 2);
- NETDATA_DOUBLE v;
-
- RRDHOST *host = pluginsd_require_scope_host(parser, PLUGINSD_KEYWORD_VARIABLE);
- if(!host) return PLUGINSD_DISABLE_PLUGIN(parser, NULL, NULL);
-
- RRDSET *st = pluginsd_get_scope_chart(parser);
-
- int global = (st) ? 0 : 1;
-
- if (name && *name) {
- if ((strcmp(name, "GLOBAL") == 0 || strcmp(name, "HOST") == 0)) {
- global = 1;
- name = get_word(words, num_words, 2);
- value = get_word(words, num_words, 3);
- } else if ((strcmp(name, "LOCAL") == 0 || strcmp(name, "CHART") == 0)) {
- global = 0;
- name = get_word(words, num_words, 2);
- value = get_word(words, num_words, 3);
- }
- }
-
- if (unlikely(!name || !*name))
- return PLUGINSD_DISABLE_PLUGIN(parser, PLUGINSD_KEYWORD_VARIABLE, "missing variable name");
-
- if (unlikely(!value || !*value))
- value = NULL;
-
- if (unlikely(!value)) {
- netdata_log_error("PLUGINSD: 'host:%s/chart:%s' cannot set %s VARIABLE '%s' to an empty value",
- rrdhost_hostname(host),
- st ? rrdset_id(st):"UNSET",
- (global) ? "HOST" : "CHART",
- name);
- return PARSER_RC_OK;
- }
-
- if (!global && !st)
- return PLUGINSD_DISABLE_PLUGIN(parser, PLUGINSD_KEYWORD_VARIABLE, "no chart is defined and no GLOBAL is given");
-
- char *endptr = NULL;
- v = (NETDATA_DOUBLE) str2ndd_encoded(value, &endptr);
- if (unlikely(endptr && *endptr)) {
- if (endptr == value)
- netdata_log_error("PLUGINSD: 'host:%s/chart:%s' the value '%s' of VARIABLE '%s' cannot be parsed as a number",
- rrdhost_hostname(host),
- st ? rrdset_id(st):"UNSET",
- value,
- name);
- else
- netdata_log_error("PLUGINSD: 'host:%s/chart:%s' the value '%s' of VARIABLE '%s' has leftovers: '%s'",
- rrdhost_hostname(host),
- st ? rrdset_id(st):"UNSET",
- value,
- name,
- endptr);
- }
-
- if (global) {
- const RRDVAR_ACQUIRED *rva = rrdvar_host_variable_add_and_acquire(host, name);
- if (rva) {
- rrdvar_host_variable_set(host, rva, v);
- rrdvar_host_variable_release(host, rva);
- }
- else
- netdata_log_error("PLUGINSD: 'host:%s' cannot find/create HOST VARIABLE '%s'",
- rrdhost_hostname(host),
- name);
- } else {
- const RRDVAR_ACQUIRED *rsa = rrdvar_chart_variable_add_and_acquire(st, name);
- if (rsa) {
- rrdvar_chart_variable_set(st, rsa, v);
- rrdvar_chart_variable_release(st, rsa);
- }
- else
- netdata_log_error("PLUGINSD: 'host:%s/chart:%s' cannot find/create CHART VARIABLE '%s'",
- rrdhost_hostname(host), rrdset_id(st), name);
- }
-
- return PARSER_RC_OK;
-}
-
-static inline PARSER_RC pluginsd_flush(char **words __maybe_unused, size_t num_words __maybe_unused, PARSER *parser) {
- netdata_log_debug(D_PLUGINSD, "requested a " PLUGINSD_KEYWORD_FLUSH);
- pluginsd_clear_scope_chart(parser, PLUGINSD_KEYWORD_FLUSH);
- parser->user.replay.start_time = 0;
- parser->user.replay.end_time = 0;
- parser->user.replay.start_time_ut = 0;
- parser->user.replay.end_time_ut = 0;
- return PARSER_RC_OK;
-}
-
-static inline PARSER_RC pluginsd_disable(char **words __maybe_unused, size_t num_words __maybe_unused, PARSER *parser) {
- netdata_log_info("PLUGINSD: plugin called DISABLE. Disabling it.");
- parser->user.enabled = 0;
- return PARSER_RC_STOP;
-}
-
-static inline PARSER_RC pluginsd_label(char **words, size_t num_words, PARSER *parser) {
- const char *name = get_word(words, num_words, 1);
- const char *label_source = get_word(words, num_words, 2);
- const char *value = get_word(words, num_words, 3);
-
- if (!name || !label_source || !value)
- return PLUGINSD_DISABLE_PLUGIN(parser, PLUGINSD_KEYWORD_LABEL, "missing parameters");
-
- char *store = (char *)value;
- bool allocated_store = false;
-
- if(unlikely(num_words > 4)) {
- allocated_store = true;
- store = mallocz(PLUGINSD_LINE_MAX + 1);
- size_t remaining = PLUGINSD_LINE_MAX;
- char *move = store;
- char *word;
- for(size_t i = 3; i < num_words && remaining > 2 && (word = get_word(words, num_words, i)) ;i++) {
- if(i > 3) {
- *move++ = ' ';
- *move = '\0';
- remaining--;
- }
-
- size_t length = strlen(word);
- if (length > remaining)
- length = remaining;
-
- remaining -= length;
- memcpy(move, word, length);
- move += length;
- *move = '\0';
- }
- }
-
- if(unlikely(!(parser->user.new_host_labels)))
- parser->user.new_host_labels = rrdlabels_create();
-
- if (strcmp(name,HOST_LABEL_IS_EPHEMERAL) == 0) {
- int is_ephemeral = appconfig_test_boolean_value((char *) value);
- RRDHOST *host = pluginsd_require_scope_host(parser, PLUGINSD_KEYWORD_LABEL);
- if (host) {
- if (is_ephemeral)
- rrdhost_option_set(host, RRDHOST_OPTION_EPHEMERAL_HOST);
- else
- rrdhost_option_clear(host, RRDHOST_OPTION_EPHEMERAL_HOST);
- }
- }
-
- rrdlabels_add(parser->user.new_host_labels, name, store, str2l(label_source));
-
- if (allocated_store)
- freez(store);
-
- return PARSER_RC_OK;
-}
-
-static inline PARSER_RC pluginsd_overwrite(char **words __maybe_unused, size_t num_words __maybe_unused, PARSER *parser) {
- RRDHOST *host = pluginsd_require_scope_host(parser, PLUGINSD_KEYWORD_OVERWRITE);
- if(!host) return PLUGINSD_DISABLE_PLUGIN(parser, NULL, NULL);
-
- netdata_log_debug(D_PLUGINSD, "requested to OVERWRITE host labels");
-
- if(unlikely(!host->rrdlabels))
- host->rrdlabels = rrdlabels_create();
-
- rrdlabels_migrate_to_these(host->rrdlabels, parser->user.new_host_labels);
- if (rrdhost_option_check(host, RRDHOST_OPTION_EPHEMERAL_HOST))
- rrdlabels_add(host->rrdlabels, HOST_LABEL_IS_EPHEMERAL, "true", RRDLABEL_SRC_CONFIG);
-
- if(!rrdlabels_exist(host->rrdlabels, "_os"))
- rrdlabels_add(host->rrdlabels, "_os", string2str(host->os), RRDLABEL_SRC_AUTO);
-
- if(!rrdlabels_exist(host->rrdlabels, "_hostname"))
- rrdlabels_add(host->rrdlabels, "_hostname", string2str(host->hostname), RRDLABEL_SRC_AUTO);
-
- rrdhost_flag_set(host, RRDHOST_FLAG_METADATA_LABELS | RRDHOST_FLAG_METADATA_UPDATE);
-
- rrdlabels_destroy(parser->user.new_host_labels);
- parser->user.new_host_labels = NULL;
- return PARSER_RC_OK;
-}
-
-static inline PARSER_RC pluginsd_clabel(char **words, size_t num_words, PARSER *parser) {
- const char *name = get_word(words, num_words, 1);
- const char *value = get_word(words, num_words, 2);
- const char *label_source = get_word(words, num_words, 3);
-
- if (!name || !value || !label_source) {
- netdata_log_error("Ignoring malformed or empty CHART LABEL command.");
- return PLUGINSD_DISABLE_PLUGIN(parser, NULL, NULL);
- }
-
- if(unlikely(!parser->user.chart_rrdlabels_linked_temporarily)) {
- RRDSET *st = pluginsd_get_scope_chart(parser);
- parser->user.chart_rrdlabels_linked_temporarily = st->rrdlabels;
- rrdlabels_unmark_all(parser->user.chart_rrdlabels_linked_temporarily);
- }
-
- rrdlabels_add(parser->user.chart_rrdlabels_linked_temporarily, name, value, str2l(label_source));
-
- return PARSER_RC_OK;
-}
-
-static inline PARSER_RC pluginsd_clabel_commit(char **words __maybe_unused, size_t num_words __maybe_unused, PARSER *parser) {
- RRDHOST *host = pluginsd_require_scope_host(parser, PLUGINSD_KEYWORD_CLABEL_COMMIT);
- if(!host) return PLUGINSD_DISABLE_PLUGIN(parser, NULL, NULL);
-
- RRDSET *st = pluginsd_require_scope_chart(parser, PLUGINSD_KEYWORD_CLABEL_COMMIT, PLUGINSD_KEYWORD_BEGIN);
- if(!st) return PLUGINSD_DISABLE_PLUGIN(parser, NULL, NULL);
-
- netdata_log_debug(D_PLUGINSD, "requested to commit chart labels");
-
- if(!parser->user.chart_rrdlabels_linked_temporarily) {
- netdata_log_error("PLUGINSD: 'host:%s' got CLABEL_COMMIT, without a CHART or BEGIN. Ignoring it.", rrdhost_hostname(host));
- return PLUGINSD_DISABLE_PLUGIN(parser, NULL, NULL);
- }
-
- rrdlabels_remove_all_unmarked(parser->user.chart_rrdlabels_linked_temporarily);
-
- rrdset_flag_set(st, RRDSET_FLAG_METADATA_UPDATE);
- rrdhost_flag_set(st->rrdhost, RRDHOST_FLAG_METADATA_UPDATE);
- rrdset_metadata_updated(st);
-
- parser->user.chart_rrdlabels_linked_temporarily = NULL;
- return PARSER_RC_OK;
-}
-
-static inline PARSER_RC pluginsd_begin_v2(char **words, size_t num_words, PARSER *parser) {
- timing_init();
-
- int idx = 1;
- ssize_t slot = pluginsd_parse_rrd_slot(words, num_words);
- if(slot >= 0) idx++;
-
- char *id = get_word(words, num_words, idx++);
- char *update_every_str = get_word(words, num_words, idx++);
- char *end_time_str = get_word(words, num_words, idx++);
- char *wall_clock_time_str = get_word(words, num_words, idx++);
-
- if(unlikely(!id || !update_every_str || !end_time_str || !wall_clock_time_str))
- return PLUGINSD_DISABLE_PLUGIN(parser, PLUGINSD_KEYWORD_BEGIN_V2, "missing parameters");
-
- RRDHOST *host = pluginsd_require_scope_host(parser, PLUGINSD_KEYWORD_BEGIN_V2);
- if(unlikely(!host)) return PLUGINSD_DISABLE_PLUGIN(parser, NULL, NULL);
-
- timing_step(TIMING_STEP_BEGIN2_PREPARE);
-
- RRDSET *st = pluginsd_rrdset_cache_get_from_slot(parser, host, id, slot, PLUGINSD_KEYWORD_BEGIN_V2);
-
- if(unlikely(!st)) return PLUGINSD_DISABLE_PLUGIN(parser, NULL, NULL);
-
- if(!pluginsd_set_scope_chart(parser, st, PLUGINSD_KEYWORD_BEGIN_V2))
- return PLUGINSD_DISABLE_PLUGIN(parser, NULL, NULL);
-
- if(unlikely(rrdset_flag_check(st, RRDSET_FLAG_OBSOLETE)))
- rrdset_isnot_obsolete___safe_from_collector_thread(st);
-
- timing_step(TIMING_STEP_BEGIN2_FIND_CHART);
-
- // ------------------------------------------------------------------------
- // parse the parameters
-
- time_t update_every = (time_t) str2ull_encoded(update_every_str);
- time_t end_time = (time_t) str2ull_encoded(end_time_str);
-
- time_t wall_clock_time;
- if(likely(*wall_clock_time_str == '#'))
- wall_clock_time = end_time;
- else
- wall_clock_time = (time_t) str2ull_encoded(wall_clock_time_str);
-
- if (unlikely(update_every != st->update_every))
- rrdset_set_update_every_s(st, update_every);
-
- timing_step(TIMING_STEP_BEGIN2_PARSE);
-
- // ------------------------------------------------------------------------
- // prepare our state
-
- pluginsd_lock_rrdset_data_collection(parser);
-
- parser->user.v2.update_every = update_every;
- parser->user.v2.end_time = end_time;
- parser->user.v2.wall_clock_time = wall_clock_time;
- parser->user.v2.ml_locked = ml_chart_update_begin(st);
-
- timing_step(TIMING_STEP_BEGIN2_ML);
-
- // ------------------------------------------------------------------------
- // propagate it forward in v2
-
- if(!parser->user.v2.stream_buffer.wb && rrdhost_has_rrdpush_sender_enabled(st->rrdhost))
- parser->user.v2.stream_buffer = rrdset_push_metric_initialize(parser->user.st, wall_clock_time);
-
- if(parser->user.v2.stream_buffer.v2 && parser->user.v2.stream_buffer.wb) {
- // check receiver capabilities
- bool can_copy = stream_has_capability(&parser->user, STREAM_CAP_IEEE754) == stream_has_capability(&parser->user.v2.stream_buffer, STREAM_CAP_IEEE754);
-
- // check sender capabilities
- bool with_slots = stream_has_capability(&parser->user.v2.stream_buffer, STREAM_CAP_SLOTS) ? true : false;
- NUMBER_ENCODING integer_encoding = stream_has_capability(&parser->user.v2.stream_buffer, STREAM_CAP_IEEE754) ? NUMBER_ENCODING_BASE64 : NUMBER_ENCODING_HEX;
-
- BUFFER *wb = parser->user.v2.stream_buffer.wb;
-
- buffer_need_bytes(wb, 1024);
-
- if(unlikely(parser->user.v2.stream_buffer.begin_v2_added))
- buffer_fast_strcat(wb, PLUGINSD_KEYWORD_END_V2 "\n", sizeof(PLUGINSD_KEYWORD_END_V2) - 1 + 1);
-
- buffer_fast_strcat(wb, PLUGINSD_KEYWORD_BEGIN_V2, sizeof(PLUGINSD_KEYWORD_BEGIN_V2) - 1);
-
- if(with_slots) {
- buffer_fast_strcat(wb, " "PLUGINSD_KEYWORD_SLOT":", sizeof(PLUGINSD_KEYWORD_SLOT) - 1 + 2);
- buffer_print_uint64_encoded(wb, integer_encoding, st->rrdpush.sender.chart_slot);
- }
-
- buffer_fast_strcat(wb, " '", 2);
- buffer_fast_strcat(wb, rrdset_id(st), string_strlen(st->id));
- buffer_fast_strcat(wb, "' ", 2);
-
- if(can_copy)
- buffer_strcat(wb, update_every_str);
- else
- buffer_print_uint64_encoded(wb, integer_encoding, update_every);
-
- buffer_fast_strcat(wb, " ", 1);
-
- if(can_copy)
- buffer_strcat(wb, end_time_str);
- else
- buffer_print_uint64_encoded(wb, integer_encoding, end_time);
-
- buffer_fast_strcat(wb, " ", 1);
-
- if(can_copy)
- buffer_strcat(wb, wall_clock_time_str);
- else
- buffer_print_uint64_encoded(wb, integer_encoding, wall_clock_time);
-
- buffer_fast_strcat(wb, "\n", 1);
-
- parser->user.v2.stream_buffer.last_point_end_time_s = end_time;
- parser->user.v2.stream_buffer.begin_v2_added = true;
- }
-
- timing_step(TIMING_STEP_BEGIN2_PROPAGATE);
-
- // ------------------------------------------------------------------------
- // store it
-
- st->last_collected_time.tv_sec = end_time;
- st->last_collected_time.tv_usec = 0;
- st->last_updated.tv_sec = end_time;
- st->last_updated.tv_usec = 0;
- st->counter++;
- st->counter_done++;
-
- // these are only needed for db mode RAM, ALLOC
- st->db.current_entry++;
- if(st->db.current_entry >= st->db.entries)
- st->db.current_entry -= st->db.entries;
-
- timing_step(TIMING_STEP_BEGIN2_STORE);
-
- return PARSER_RC_OK;
-}
-
-static inline PARSER_RC pluginsd_set_v2(char **words, size_t num_words, PARSER *parser) {
- timing_init();
-
- int idx = 1;
- ssize_t slot = pluginsd_parse_rrd_slot(words, num_words);
- if(slot >= 0) idx++;
-
- char *dimension = get_word(words, num_words, idx++);
- char *collected_str = get_word(words, num_words, idx++);
- char *value_str = get_word(words, num_words, idx++);
- char *flags_str = get_word(words, num_words, idx++);
-
- if(unlikely(!dimension || !collected_str || !value_str || !flags_str))
- return PLUGINSD_DISABLE_PLUGIN(parser, PLUGINSD_KEYWORD_SET_V2, "missing parameters");
-
- RRDHOST *host = pluginsd_require_scope_host(parser, PLUGINSD_KEYWORD_SET_V2);
- if(unlikely(!host)) return PLUGINSD_DISABLE_PLUGIN(parser, NULL, NULL);
-
- RRDSET *st = pluginsd_require_scope_chart(parser, PLUGINSD_KEYWORD_SET_V2, PLUGINSD_KEYWORD_BEGIN_V2);
- if(unlikely(!st)) return PLUGINSD_DISABLE_PLUGIN(parser, NULL, NULL);
-
- timing_step(TIMING_STEP_SET2_PREPARE);
-
- RRDDIM *rd = pluginsd_acquire_dimension(host, st, dimension, slot, PLUGINSD_KEYWORD_SET_V2);
- if(unlikely(!rd)) return PLUGINSD_DISABLE_PLUGIN(parser, NULL, NULL);
-
- st->pluginsd.set = true;
-
- if(unlikely(rrddim_flag_check(rd, RRDDIM_FLAG_OBSOLETE | RRDDIM_FLAG_ARCHIVED)))
- rrddim_isnot_obsolete___safe_from_collector_thread(st, rd);
-
- timing_step(TIMING_STEP_SET2_LOOKUP_DIMENSION);
-
- // ------------------------------------------------------------------------
- // parse the parameters
-
- collected_number collected_value = (collected_number) str2ll_encoded(collected_str);
-
- NETDATA_DOUBLE value;
- if(*value_str == '#')
- value = (NETDATA_DOUBLE)collected_value;
- else
- value = str2ndd_encoded(value_str, NULL);
-
- SN_FLAGS flags = pluginsd_parse_storage_number_flags(flags_str);
-
- timing_step(TIMING_STEP_SET2_PARSE);
-
- // ------------------------------------------------------------------------
- // check value and ML
-
- if (unlikely(!netdata_double_isnumber(value) || (flags == SN_EMPTY_SLOT))) {
- value = NAN;
- flags = SN_EMPTY_SLOT;
-
- if(parser->user.v2.ml_locked)
- ml_dimension_is_anomalous(rd, parser->user.v2.end_time, 0, false);
- }
- else if(parser->user.v2.ml_locked) {
- if (ml_dimension_is_anomalous(rd, parser->user.v2.end_time, value, true)) {
- // clear anomaly bit: 0 -> is anomalous, 1 -> not anomalous
- flags &= ~((storage_number) SN_FLAG_NOT_ANOMALOUS);
- }
- else
- flags |= SN_FLAG_NOT_ANOMALOUS;
- }
-
- timing_step(TIMING_STEP_SET2_ML);
-
- // ------------------------------------------------------------------------
- // propagate it forward in v2
-
- if(parser->user.v2.stream_buffer.v2 && parser->user.v2.stream_buffer.begin_v2_added && parser->user.v2.stream_buffer.wb) {
- // check if receiver and sender have the same number parsing capabilities
- bool can_copy = stream_has_capability(&parser->user, STREAM_CAP_IEEE754) == stream_has_capability(&parser->user.v2.stream_buffer, STREAM_CAP_IEEE754);
-
- // check the sender capabilities
- bool with_slots = stream_has_capability(&parser->user.v2.stream_buffer, STREAM_CAP_SLOTS) ? true : false;
- NUMBER_ENCODING integer_encoding = stream_has_capability(&parser->user.v2.stream_buffer, STREAM_CAP_IEEE754) ? NUMBER_ENCODING_BASE64 : NUMBER_ENCODING_HEX;
- NUMBER_ENCODING doubles_encoding = stream_has_capability(&parser->user.v2.stream_buffer, STREAM_CAP_IEEE754) ? NUMBER_ENCODING_BASE64 : NUMBER_ENCODING_DECIMAL;
-
- BUFFER *wb = parser->user.v2.stream_buffer.wb;
- buffer_need_bytes(wb, 1024);
- buffer_fast_strcat(wb, PLUGINSD_KEYWORD_SET_V2, sizeof(PLUGINSD_KEYWORD_SET_V2) - 1);
-
- if(with_slots) {
- buffer_fast_strcat(wb, " "PLUGINSD_KEYWORD_SLOT":", sizeof(PLUGINSD_KEYWORD_SLOT) - 1 + 2);
- buffer_print_uint64_encoded(wb, integer_encoding, rd->rrdpush.sender.dim_slot);
- }
-
- buffer_fast_strcat(wb, " '", 2);
- buffer_fast_strcat(wb, rrddim_id(rd), string_strlen(rd->id));
- buffer_fast_strcat(wb, "' ", 2);
- if(can_copy)
- buffer_strcat(wb, collected_str);
- else
- buffer_print_int64_encoded(wb, integer_encoding, collected_value); // original v2 had hex
- buffer_fast_strcat(wb, " ", 1);
- if(can_copy)
- buffer_strcat(wb, value_str);
- else
- buffer_print_netdata_double_encoded(wb, doubles_encoding, value); // original v2 had decimal
- buffer_fast_strcat(wb, " ", 1);
- buffer_print_sn_flags(wb, flags, true);
- buffer_fast_strcat(wb, "\n", 1);
- }
-
- timing_step(TIMING_STEP_SET2_PROPAGATE);
-
- // ------------------------------------------------------------------------
- // store it
-
- rrddim_store_metric(rd, parser->user.v2.end_time * USEC_PER_SEC, value, flags);
- rd->collector.last_collected_time.tv_sec = parser->user.v2.end_time;
- rd->collector.last_collected_time.tv_usec = 0;
- rd->collector.last_collected_value = collected_value;
- rd->collector.last_stored_value = value;
- rd->collector.last_calculated_value = value;
- rd->collector.counter++;
- rrddim_set_updated(rd);
-
- timing_step(TIMING_STEP_SET2_STORE);
-
- return PARSER_RC_OK;
-}
-
-static inline PARSER_RC pluginsd_end_v2(char **words __maybe_unused, size_t num_words __maybe_unused, PARSER *parser) {
- timing_init();
-
- RRDHOST *host = pluginsd_require_scope_host(parser, PLUGINSD_KEYWORD_END_V2);
- if(unlikely(!host)) return PLUGINSD_DISABLE_PLUGIN(parser, NULL, NULL);
-
- RRDSET *st = pluginsd_require_scope_chart(parser, PLUGINSD_KEYWORD_END_V2, PLUGINSD_KEYWORD_BEGIN_V2);
- if(unlikely(!st)) return PLUGINSD_DISABLE_PLUGIN(parser, NULL, NULL);
-
- parser->user.data_collections_count++;
-
- timing_step(TIMING_STEP_END2_PREPARE);
-
- // ------------------------------------------------------------------------
- // propagate the whole chart update in v1
-
- if(unlikely(!parser->user.v2.stream_buffer.v2 && !parser->user.v2.stream_buffer.begin_v2_added && parser->user.v2.stream_buffer.wb))
- rrdset_push_metrics_v1(&parser->user.v2.stream_buffer, st);
-
- timing_step(TIMING_STEP_END2_PUSH_V1);
-
- // ------------------------------------------------------------------------
- // unblock data collection
-
- pluginsd_unlock_previous_scope_chart(parser, PLUGINSD_KEYWORD_END_V2, false);
- rrdcontext_collected_rrdset(st);
- store_metric_collection_completed();
-
- timing_step(TIMING_STEP_END2_RRDSET);
-
- // ------------------------------------------------------------------------
- // propagate it forward
-
- rrdset_push_metrics_finished(&parser->user.v2.stream_buffer, st);
-
- timing_step(TIMING_STEP_END2_PROPAGATE);
-
- // ------------------------------------------------------------------------
- // cleanup RRDSET / RRDDIM
-
- if(likely(st->pluginsd.dims_with_slots)) {
- for(size_t i = 0; i < st->pluginsd.size ;i++) {
- RRDDIM *rd = st->pluginsd.prd_array[i].rd;
-
- if(!rd)
- continue;
-
- rd->collector.calculated_value = 0;
- rd->collector.collected_value = 0;
- rrddim_clear_updated(rd);
- }
- }
- else {
- RRDDIM *rd;
- rrddim_foreach_read(rd, st){
- rd->collector.calculated_value = 0;
- rd->collector.collected_value = 0;
- rrddim_clear_updated(rd);
- }
- rrddim_foreach_done(rd);
- }
-
- // ------------------------------------------------------------------------
- // reset state
-
- parser->user.v2 = (struct parser_user_object_v2){ 0 };
-
- timing_step(TIMING_STEP_END2_STORE);
- timing_report();
-
- return PARSER_RC_OK;
-}
-
-static inline PARSER_RC pluginsd_exit(char **words __maybe_unused, size_t num_words __maybe_unused, PARSER *parser __maybe_unused) {
- netdata_log_info("PLUGINSD: plugin called EXIT.");
- return PARSER_RC_STOP;
-}
-
-static inline PARSER_RC streaming_claimed_id(char **words, size_t num_words, PARSER *parser)
-{
- const char *host_uuid_str = get_word(words, num_words, 1);
- const char *claim_id_str = get_word(words, num_words, 2);
-
- if (!host_uuid_str || !claim_id_str) {
- netdata_log_error("Command CLAIMED_ID came malformed, uuid = '%s', claim_id = '%s'",
- host_uuid_str ? host_uuid_str : "[unset]",
- claim_id_str ? claim_id_str : "[unset]");
- return PARSER_RC_ERROR;
- }
-
- nd_uuid_t uuid;
- RRDHOST *host = parser->user.host;
-
- // We don't need the parsed UUID
- // just do it to check the format
- if(uuid_parse(host_uuid_str, uuid)) {
- netdata_log_error("1st parameter (host GUID) to CLAIMED_ID command is not valid GUID. Received: \"%s\".", host_uuid_str);
- return PARSER_RC_ERROR;
- }
- if(uuid_parse(claim_id_str, uuid) && strcmp(claim_id_str, "NULL") != 0) {
- netdata_log_error("2nd parameter (Claim ID) to CLAIMED_ID command is not valid GUID. Received: \"%s\".", claim_id_str);
- return PARSER_RC_ERROR;
- }
-
- if(strcmp(host_uuid_str, host->machine_guid) != 0) {
- netdata_log_error("Claim ID is for host \"%s\" but it came over connection for \"%s\"", host_uuid_str, host->machine_guid);
- return PARSER_RC_OK; //the message is OK problem must be somewhere else
- }
-
- rrdhost_aclk_state_lock(host);
-
- if (host->aclk_state.claimed_id)
- freez(host->aclk_state.claimed_id);
-
- host->aclk_state.claimed_id = strcmp(claim_id_str, "NULL") ? strdupz(claim_id_str) : NULL;
-
- rrdhost_aclk_state_unlock(host);
-
- rrdhost_flag_set(host, RRDHOST_FLAG_METADATA_CLAIMID |RRDHOST_FLAG_METADATA_UPDATE);
-
- rrdpush_send_claimed_id(host);
-
- return PARSER_RC_OK;
-}
-
-// ----------------------------------------------------------------------------
-
-void pluginsd_cleanup_v2(PARSER *parser) {
- // this is called when the thread is stopped while processing
- pluginsd_clear_scope_chart(parser, "THREAD CLEANUP");
-}
-
-void pluginsd_process_thread_cleanup(void *pptr) {
- PARSER *parser = CLEANUP_FUNCTION_GET_PTR(pptr);
- if(!parser) return;
-
- pluginsd_cleanup_v2(parser);
- pluginsd_host_define_cleanup(parser);
-
- rrd_collector_finished();
-
-#ifdef NETDATA_LOG_STREAM_RECEIVE
- if(parser->user.stream_log_fp) {
- fclose(parser->user.stream_log_fp);
- parser->user.stream_log_fp = NULL;
- }
-#endif
-
- parser_destroy(parser);
-}
-
-bool parser_reconstruct_node(BUFFER *wb, void *ptr) {
- PARSER *parser = ptr;
- if(!parser || !parser->user.host)
- return false;
-
- buffer_strcat(wb, rrdhost_hostname(parser->user.host));
- return true;
-}
-
-bool parser_reconstruct_instance(BUFFER *wb, void *ptr) {
- PARSER *parser = ptr;
- if(!parser || !parser->user.st)
- return false;
-
- buffer_strcat(wb, rrdset_name(parser->user.st));
- return true;
-}
-
-bool parser_reconstruct_context(BUFFER *wb, void *ptr) {
- PARSER *parser = ptr;
- if(!parser || !parser->user.st)
- return false;
-
- buffer_strcat(wb, string2str(parser->user.st->context));
- return true;
-}
-
-inline size_t pluginsd_process(RRDHOST *host, struct plugind *cd, FILE *fp_plugin_input, FILE *fp_plugin_output, int trust_durations)
-{
- int enabled = cd->unsafe.enabled;
-
- if (!fp_plugin_input || !fp_plugin_output || !enabled) {
- cd->unsafe.enabled = 0;
- return 0;
- }
-
- if (unlikely(fileno(fp_plugin_input) == -1)) {
- netdata_log_error("input file descriptor given is not a valid stream");
- cd->serial_failures++;
- return 0;
- }
-
- if (unlikely(fileno(fp_plugin_output) == -1)) {
- netdata_log_error("output file descriptor given is not a valid stream");
- cd->serial_failures++;
- return 0;
- }
-
- clearerr(fp_plugin_input);
- clearerr(fp_plugin_output);
-
- PARSER *parser;
- {
- PARSER_USER_OBJECT user = {
- .enabled = cd->unsafe.enabled,
- .host = host,
- .cd = cd,
- .trust_durations = trust_durations
- };
-
- // fp_plugin_output = our input; fp_plugin_input = our output
- parser = parser_init(&user, fp_plugin_output, fp_plugin_input, -1, PARSER_INPUT_SPLIT, NULL);
- }
-
- pluginsd_keywords_init(parser, PARSER_INIT_PLUGINSD);
-
- rrd_collector_started();
-
- size_t count = 0;
-
- ND_LOG_STACK lgs[] = {
- ND_LOG_FIELD_CB(NDF_REQUEST, line_splitter_reconstruct_line, &parser->line),
- ND_LOG_FIELD_CB(NDF_NIDL_NODE, parser_reconstruct_node, parser),
- ND_LOG_FIELD_CB(NDF_NIDL_INSTANCE, parser_reconstruct_instance, parser),
- ND_LOG_FIELD_CB(NDF_NIDL_CONTEXT, parser_reconstruct_context, parser),
- ND_LOG_FIELD_END(),
- };
- ND_LOG_STACK_PUSH(lgs);
-
- CLEANUP_FUNCTION_REGISTER(pluginsd_process_thread_cleanup) cleanup_parser = parser;
- buffered_reader_init(&parser->reader);
- CLEAN_BUFFER *buffer = buffer_create(sizeof(parser->reader.read_buffer) + 2, NULL);
- while(likely(service_running(SERVICE_COLLECTORS))) {
-
- if(unlikely(!buffered_reader_next_line(&parser->reader, buffer))) {
- buffered_reader_ret_t ret = buffered_reader_read_timeout(
- &parser->reader,
- fileno((FILE *) parser->fp_input),
- 2 * 60 * MSEC_PER_SEC, true
- );
-
- if(unlikely(ret != BUFFERED_READER_READ_OK))
- break;
-
- continue;
- }
-
- if(unlikely(parser_action(parser, buffer->buffer)))
- break;
-
- buffer->len = 0;
- buffer->buffer[0] = '\0';
- }
-
- cd->unsafe.enabled = parser->user.enabled;
- count = parser->user.data_collections_count;
-
- if(likely(count)) {
- cd->successful_collections += count;
- cd->serial_failures = 0;
- }
- else
- cd->serial_failures++;
-
- return count;
-}
-
-#include "gperf-hashtable.h"
-
-PARSER_RC parser_execute(PARSER *parser, const PARSER_KEYWORD *keyword, char **words, size_t num_words) {
- switch(keyword->id) {
- case PLUGINSD_KEYWORD_ID_SET2:
- return pluginsd_set_v2(words, num_words, parser);
- case PLUGINSD_KEYWORD_ID_BEGIN2:
- return pluginsd_begin_v2(words, num_words, parser);
- case PLUGINSD_KEYWORD_ID_END2:
- return pluginsd_end_v2(words, num_words, parser);
- case PLUGINSD_KEYWORD_ID_SET:
- return pluginsd_set(words, num_words, parser);
- case PLUGINSD_KEYWORD_ID_BEGIN:
- return pluginsd_begin(words, num_words, parser);
- case PLUGINSD_KEYWORD_ID_END:
- return pluginsd_end(words, num_words, parser);
- case PLUGINSD_KEYWORD_ID_RSET:
- return pluginsd_replay_set(words, num_words, parser);
- case PLUGINSD_KEYWORD_ID_RBEGIN:
- return pluginsd_replay_begin(words, num_words, parser);
- case PLUGINSD_KEYWORD_ID_RDSTATE:
- return pluginsd_replay_rrddim_collection_state(words, num_words, parser);
- case PLUGINSD_KEYWORD_ID_RSSTATE:
- return pluginsd_replay_rrdset_collection_state(words, num_words, parser);
- case PLUGINSD_KEYWORD_ID_REND:
- return pluginsd_replay_end(words, num_words, parser);
- case PLUGINSD_KEYWORD_ID_DIMENSION:
- return pluginsd_dimension(words, num_words, parser);
- case PLUGINSD_KEYWORD_ID_CHART:
- return pluginsd_chart(words, num_words, parser);
- case PLUGINSD_KEYWORD_ID_CHART_DEFINITION_END:
- return pluginsd_chart_definition_end(words, num_words, parser);
- case PLUGINSD_KEYWORD_ID_CLABEL:
- return pluginsd_clabel(words, num_words, parser);
- case PLUGINSD_KEYWORD_ID_CLABEL_COMMIT:
- return pluginsd_clabel_commit(words, num_words, parser);
- case PLUGINSD_KEYWORD_ID_FUNCTION:
- return pluginsd_function(words, num_words, parser);
- case PLUGINSD_KEYWORD_ID_FUNCTION_RESULT_BEGIN:
- return pluginsd_function_result_begin(words, num_words, parser);
- case PLUGINSD_KEYWORD_ID_FUNCTION_PROGRESS:
- return pluginsd_function_progress(words, num_words, parser);
- case PLUGINSD_KEYWORD_ID_LABEL:
- return pluginsd_label(words, num_words, parser);
- case PLUGINSD_KEYWORD_ID_OVERWRITE:
- return pluginsd_overwrite(words, num_words, parser);
- case PLUGINSD_KEYWORD_ID_VARIABLE:
- return pluginsd_variable(words, num_words, parser);
- case PLUGINSD_KEYWORD_ID_CLAIMED_ID:
- return streaming_claimed_id(words, num_words, parser);
- case PLUGINSD_KEYWORD_ID_HOST:
- return pluginsd_host(words, num_words, parser);
- case PLUGINSD_KEYWORD_ID_HOST_DEFINE:
- return pluginsd_host_define(words, num_words, parser);
- case PLUGINSD_KEYWORD_ID_HOST_DEFINE_END:
- return pluginsd_host_define_end(words, num_words, parser);
- case PLUGINSD_KEYWORD_ID_HOST_LABEL:
- return pluginsd_host_labels(words, num_words, parser);
- case PLUGINSD_KEYWORD_ID_FLUSH:
- return pluginsd_flush(words, num_words, parser);
- case PLUGINSD_KEYWORD_ID_DISABLE:
- return pluginsd_disable(words, num_words, parser);
- case PLUGINSD_KEYWORD_ID_EXIT:
- return pluginsd_exit(words, num_words, parser);
- case PLUGINSD_KEYWORD_ID_CONFIG:
- return pluginsd_config(words, num_words, parser);
-
- case PLUGINSD_KEYWORD_ID_DYNCFG_ENABLE:
- case PLUGINSD_KEYWORD_ID_DYNCFG_REGISTER_MODULE:
- case PLUGINSD_KEYWORD_ID_DYNCFG_REGISTER_JOB:
- case PLUGINSD_KEYWORD_ID_DYNCFG_RESET:
- case PLUGINSD_KEYWORD_ID_REPORT_JOB_STATUS:
- case PLUGINSD_KEYWORD_ID_DELETE_JOB:
- return pluginsd_dyncfg_noop(words, num_words, parser);
-
- default:
- netdata_log_error("Unknown keyword '%s' with id %zu", keyword->keyword, keyword->id);
- return PARSER_RC_ERROR;;
- }
-}
-
-void parser_init_repertoire(PARSER *parser, PARSER_REPERTOIRE repertoire) {
- parser->repertoire = repertoire;
-
- for(size_t i = GPERF_PARSER_MIN_HASH_VALUE ; i <= GPERF_PARSER_MAX_HASH_VALUE ;i++) {
- if(gperf_keywords[i].keyword && *gperf_keywords[i].keyword && (parser->repertoire & gperf_keywords[i].repertoire))
- worker_register_job_name(gperf_keywords[i].worker_job_id, gperf_keywords[i].keyword);
- }
-}
-
-int pluginsd_parser_unittest(void) {
- PARSER *p = parser_init(NULL, NULL, NULL, -1, PARSER_INPUT_SPLIT, NULL);
- pluginsd_keywords_init(p, PARSER_INIT_PLUGINSD | PARSER_INIT_STREAMING);
-
- char *lines[] = {
- "BEGIN2 abcdefghijklmnopqr 123",
- "SET2 abcdefg 0x12345678 0 0",
- "SET2 hijklmnoqr 0x12345678 0 0",
- "SET2 stuvwxyz 0x12345678 0 0",
- "END2",
- NULL,
- };
-
- char *words[PLUGINSD_MAX_WORDS];
- size_t iterations = 1000000;
- size_t count = 0;
- char input[PLUGINSD_LINE_MAX + 1];
-
- usec_t started = now_realtime_usec();
- while(--iterations) {
- for(size_t line = 0; lines[line] ;line++) {
- strncpyz(input, lines[line], PLUGINSD_LINE_MAX);
- size_t num_words = quoted_strings_splitter_pluginsd(input, words, PLUGINSD_MAX_WORDS);
- const char *command = get_word(words, num_words, 0);
- const PARSER_KEYWORD *keyword = parser_find_keyword(p, command);
- if(unlikely(!keyword))
- fatal("Cannot parse the line '%s'", lines[line]);
- count++;
- }
- }
- usec_t ended = now_realtime_usec();
-
- netdata_log_info("Parsed %zu lines in %0.2f secs, %0.2f klines/sec", count,
- (double)(ended - started) / (double)USEC_PER_SEC,
- (double)count / ((double)(ended - started) / (double)USEC_PER_SEC) / 1000.0);
-
- parser_destroy(p);
- return 0;
-}
diff --git a/src/collectors/plugins.d/pluginsd_parser.h b/src/collectors/plugins.d/pluginsd_parser.h
deleted file mode 100644
index 6c126964b..000000000
--- a/src/collectors/plugins.d/pluginsd_parser.h
+++ /dev/null
@@ -1,244 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-#ifndef NETDATA_PLUGINSD_PARSER_H
-#define NETDATA_PLUGINSD_PARSER_H
-
-#include "daemon/common.h"
-
-#define WORKER_PARSER_FIRST_JOB 3
-
-// this has to be in-sync with the same at receiver.c
-#define WORKER_RECEIVER_JOB_REPLICATION_COMPLETION (WORKER_PARSER_FIRST_JOB - 3)
-
-// this controls the max response size of a function
-#define PLUGINSD_MAX_DEFERRED_SIZE (100 * 1024 * 1024)
-
-#define PLUGINSD_MIN_RRDSET_POINTERS_CACHE 1024
-
-#define HOST_LABEL_IS_EPHEMERAL "_is_ephemeral"
-// PARSER return codes
-typedef enum __attribute__ ((__packed__)) parser_rc {
- PARSER_RC_OK, // Callback was successful, go on
- PARSER_RC_STOP, // Callback says STOP
- PARSER_RC_ERROR // Callback failed (abort rest of callbacks)
-} PARSER_RC;
-
-typedef enum __attribute__ ((__packed__)) parser_input_type {
- PARSER_INPUT_SPLIT = (1 << 1),
- PARSER_DEFER_UNTIL_KEYWORD = (1 << 2),
-} PARSER_INPUT_TYPE;
-
-typedef enum __attribute__ ((__packed__)) {
- PARSER_INIT_PLUGINSD = (1 << 1),
- PARSER_INIT_STREAMING = (1 << 2),
- PARSER_REP_METADATA = (1 << 3),
-} PARSER_REPERTOIRE;
-
-struct parser;
-typedef PARSER_RC (*keyword_function)(char **words, size_t num_words, struct parser *parser);
-
-typedef struct parser_keyword {
- char *keyword;
- size_t id;
- PARSER_REPERTOIRE repertoire;
- size_t worker_job_id;
-} PARSER_KEYWORD;
-
-typedef struct parser_user_object {
- bool cleanup_slots;
- RRDSET *st;
- RRDHOST *host;
- void *opaque;
- struct plugind *cd;
- int trust_durations;
- RRDLABELS *new_host_labels;
- RRDLABELS *chart_rrdlabels_linked_temporarily;
- size_t data_collections_count;
- int enabled;
-
-#ifdef NETDATA_LOG_STREAM_RECEIVE
- FILE *stream_log_fp;
- PARSER_REPERTOIRE stream_log_repertoire;
-#endif
-
- STREAM_CAPABILITIES capabilities; // receiver capabilities
-
- struct {
- bool parsing_host;
- nd_uuid_t machine_guid;
- char machine_guid_str[UUID_STR_LEN];
- STRING *hostname;
- RRDLABELS *rrdlabels;
- } host_define;
-
- struct parser_user_object_replay {
- time_t start_time;
- time_t end_time;
-
- usec_t start_time_ut;
- usec_t end_time_ut;
-
- time_t wall_clock_time;
-
- bool rset_enabled;
- } replay;
-
- struct parser_user_object_v2 {
- bool locked_data_collection;
- RRDSET_STREAM_BUFFER stream_buffer; // sender capabilities in this
- time_t update_every;
- time_t end_time;
- time_t wall_clock_time;
- bool ml_locked;
- } v2;
-} PARSER_USER_OBJECT;
-
-typedef struct parser {
- uint8_t version; // Parser version
- PARSER_REPERTOIRE repertoire;
- uint32_t flags;
- int fd; // Socket
- FILE *fp_input; // Input source e.g. stream
- FILE *fp_output; // Stream to send commands to plugin
-
-#ifdef ENABLE_HTTPS
- NETDATA_SSL *ssl_output;
-#endif
-#ifdef ENABLE_H2O
- void *h2o_ctx; // if set we use h2o_stream functions to send data
-#endif
-
- PARSER_USER_OBJECT user; // User defined structure to hold extra state between calls
-
- struct buffered_reader reader;
- struct line_splitter line;
- const PARSER_KEYWORD *keyword;
-
- struct {
- const char *end_keyword;
- BUFFER *response;
- void (*action)(struct parser *parser, void *action_data);
- void *action_data;
- } defer;
-
- struct {
- DICTIONARY *functions;
- usec_t smaller_monotonic_timeout_ut;
- } inflight;
-
- struct {
- SPINLOCK spinlock;
- } writer;
-
-} PARSER;
-
-PARSER *parser_init(struct parser_user_object *user, FILE *fp_input, FILE *fp_output, int fd, PARSER_INPUT_TYPE flags, void *ssl);
-void parser_init_repertoire(PARSER *parser, PARSER_REPERTOIRE repertoire);
-void parser_destroy(PARSER *working_parser);
-void pluginsd_cleanup_v2(PARSER *parser);
-void pluginsd_keywords_init(PARSER *parser, PARSER_REPERTOIRE repertoire);
-PARSER_RC parser_execute(PARSER *parser, const PARSER_KEYWORD *keyword, char **words, size_t num_words);
-
-static inline int find_first_keyword(const char *src, char *dst, int dst_size, bool *isspace_map) {
- const char *s = src, *keyword_start;
-
- while (unlikely(isspace_map[(uint8_t)*s])) s++;
- keyword_start = s;
-
- while (likely(*s && !isspace_map[(uint8_t)*s]) && dst_size > 1) {
- *dst++ = *s++;
- dst_size--;
- }
- *dst = '\0';
- return dst_size == 0 ? 0 : (int) (s - keyword_start);
-}
-
-const PARSER_KEYWORD *gperf_lookup_keyword(register const char *str, register size_t len);
-
-static inline const PARSER_KEYWORD *parser_find_keyword(PARSER *parser, const char *command) {
- const PARSER_KEYWORD *t = gperf_lookup_keyword(command, strlen(command));
- if(t && (t->repertoire & parser->repertoire))
- return t;
-
- return NULL;
-}
-
-bool parser_reconstruct_node(BUFFER *wb, void *ptr);
-bool parser_reconstruct_instance(BUFFER *wb, void *ptr);
-bool parser_reconstruct_context(BUFFER *wb, void *ptr);
-
-static inline int parser_action(PARSER *parser, char *input) {
-#ifdef NETDATA_LOG_STREAM_RECEIVE
- static __thread char line[PLUGINSD_LINE_MAX + 1];
- strncpyz(line, input, sizeof(line) - 1);
-#endif
-
- parser->line.count++;
-
- if(unlikely(parser->flags & PARSER_DEFER_UNTIL_KEYWORD)) {
- char command[100 + 1];
- bool has_keyword = find_first_keyword(input, command, 100, isspace_map_pluginsd);
-
- if(!has_keyword || strcmp(command, parser->defer.end_keyword) != 0) {
- if(parser->defer.response) {
- buffer_strcat(parser->defer.response, input);
- if(buffer_strlen(parser->defer.response) > PLUGINSD_MAX_DEFERRED_SIZE) {
- // more than PLUGINSD_MAX_DEFERRED_SIZE of data,
- // or a bad plugin that did not send the end_keyword
- nd_log(NDLS_DAEMON, NDLP_ERR, "PLUGINSD: deferred response is too big (%zu bytes). Stopping this plugin.", buffer_strlen(parser->defer.response));
- return 1;
- }
- }
- return 0;
- }
- else {
- // call the action
- parser->defer.action(parser, parser->defer.action_data);
-
- // empty everything
- parser->defer.action = NULL;
- parser->defer.action_data = NULL;
- parser->defer.end_keyword = NULL;
- parser->defer.response = NULL;
- parser->flags &= ~PARSER_DEFER_UNTIL_KEYWORD;
- }
- return 0;
- }
-
- parser->line.num_words = quoted_strings_splitter_pluginsd(input, parser->line.words, PLUGINSD_MAX_WORDS);
- const char *command = get_word(parser->line.words, parser->line.num_words, 0);
-
- if(unlikely(!command)) {
- line_splitter_reset(&parser->line);
- return 0;
- }
-
- PARSER_RC rc;
- parser->keyword = parser_find_keyword(parser, command);
- if(likely(parser->keyword)) {
- worker_is_busy(parser->keyword->worker_job_id);
-
-#ifdef NETDATA_LOG_STREAM_RECEIVE
- if(parser->user.stream_log_fp && parser->keyword->repertoire & parser->user.stream_log_repertoire)
- fprintf(parser->user.stream_log_fp, "%s", line);
-#endif
-
- rc = parser_execute(parser, parser->keyword, parser->line.words, parser->line.num_words);
- // rc = (*t->func)(words, num_words, parser);
- worker_is_idle();
- }
- else
- rc = PARSER_RC_ERROR;
-
- if(rc == PARSER_RC_ERROR) {
- CLEAN_BUFFER *wb = buffer_create(1024, NULL);
- line_splitter_reconstruct_line(wb, &parser->line);
- netdata_log_error("PLUGINSD: parser_action('%s') failed on line %zu: { %s } (quotes added to show parsing)",
- command, parser->line.count, buffer_tostring(wb));
- }
-
- line_splitter_reset(&parser->line);
- return (rc == PARSER_RC_ERROR || rc == PARSER_RC_STOP);
-}
-
-#endif //NETDATA_PLUGINSD_PARSER_H
diff --git a/src/collectors/plugins.d/pluginsd_replication.c b/src/collectors/plugins.d/pluginsd_replication.c
deleted file mode 100644
index 8d0975210..000000000
--- a/src/collectors/plugins.d/pluginsd_replication.c
+++ /dev/null
@@ -1,371 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-#include "pluginsd_replication.h"
-
-PARSER_RC pluginsd_replay_begin(char **words, size_t num_words, PARSER *parser) {
- int idx = 1;
- ssize_t slot = pluginsd_parse_rrd_slot(words, num_words);
- if(slot >= 0) idx++;
-
- char *id = get_word(words, num_words, idx++);
- char *start_time_str = get_word(words, num_words, idx++);
- char *end_time_str = get_word(words, num_words, idx++);
- char *child_now_str = get_word(words, num_words, idx++);
-
- RRDHOST *host = pluginsd_require_scope_host(parser, PLUGINSD_KEYWORD_REPLAY_BEGIN);
- if(!host) return PLUGINSD_DISABLE_PLUGIN(parser, NULL, NULL);
-
- RRDSET *st;
- if (likely(!id || !*id))
- st = pluginsd_require_scope_chart(parser, PLUGINSD_KEYWORD_REPLAY_BEGIN, PLUGINSD_KEYWORD_REPLAY_BEGIN);
- else
- st = pluginsd_rrdset_cache_get_from_slot(parser, host, id, slot, PLUGINSD_KEYWORD_REPLAY_BEGIN);
-
- if(!st) return PLUGINSD_DISABLE_PLUGIN(parser, NULL, NULL);
-
- if(!pluginsd_set_scope_chart(parser, st, PLUGINSD_KEYWORD_REPLAY_BEGIN))
- return PLUGINSD_DISABLE_PLUGIN(parser, NULL, NULL);
-
- if(start_time_str && end_time_str) {
- time_t start_time = (time_t) str2ull_encoded(start_time_str);
- time_t end_time = (time_t) str2ull_encoded(end_time_str);
-
- time_t wall_clock_time = 0, tolerance;
- bool wall_clock_comes_from_child; (void)wall_clock_comes_from_child;
- if(child_now_str) {
- wall_clock_time = (time_t) str2ull_encoded(child_now_str);
- tolerance = st->update_every + 1;
- wall_clock_comes_from_child = true;
- }
-
- if(wall_clock_time <= 0) {
- wall_clock_time = now_realtime_sec();
- tolerance = st->update_every + 5;
- wall_clock_comes_from_child = false;
- }
-
-#ifdef NETDATA_LOG_REPLICATION_REQUESTS
- internal_error(
- (!st->replay.start_streaming && (end_time < st->replay.after || start_time > st->replay.before)),
- "REPLAY ERROR: 'host:%s/chart:%s' got a " PLUGINSD_KEYWORD_REPLAY_BEGIN " from %ld to %ld, which does not match our request (%ld to %ld).",
- rrdhost_hostname(st->rrdhost), rrdset_id(st), start_time, end_time, st->replay.after, st->replay.before);
-
- internal_error(
- true,
- "REPLAY: 'host:%s/chart:%s' got a " PLUGINSD_KEYWORD_REPLAY_BEGIN " from %ld to %ld, child wall clock is %ld (%s), had requested %ld to %ld",
- rrdhost_hostname(st->rrdhost), rrdset_id(st),
- start_time, end_time, wall_clock_time, wall_clock_comes_from_child ? "from child" : "parent time",
- st->replay.after, st->replay.before);
-#endif
-
- if(start_time && end_time && start_time < wall_clock_time + tolerance && end_time < wall_clock_time + tolerance && start_time < end_time) {
- if (unlikely(end_time - start_time != st->update_every))
- rrdset_set_update_every_s(st, end_time - start_time);
-
- st->last_collected_time.tv_sec = end_time;
- st->last_collected_time.tv_usec = 0;
-
- st->last_updated.tv_sec = end_time;
- st->last_updated.tv_usec = 0;
-
- st->counter++;
- st->counter_done++;
-
- // these are only needed for db mode RAM, ALLOC
- st->db.current_entry++;
- if(st->db.current_entry >= st->db.entries)
- st->db.current_entry -= st->db.entries;
-
- parser->user.replay.start_time = start_time;
- parser->user.replay.end_time = end_time;
- parser->user.replay.start_time_ut = (usec_t) start_time * USEC_PER_SEC;
- parser->user.replay.end_time_ut = (usec_t) end_time * USEC_PER_SEC;
- parser->user.replay.wall_clock_time = wall_clock_time;
- parser->user.replay.rset_enabled = true;
-
- return PARSER_RC_OK;
- }
-
- netdata_log_error("PLUGINSD REPLAY ERROR: 'host:%s/chart:%s' got a " PLUGINSD_KEYWORD_REPLAY_BEGIN
- " from %ld to %ld, but timestamps are invalid "
- "(now is %ld [%s], tolerance %ld). Ignoring " PLUGINSD_KEYWORD_REPLAY_SET,
- rrdhost_hostname(st->rrdhost), rrdset_id(st), start_time, end_time,
- wall_clock_time, wall_clock_comes_from_child ? "child wall clock" : "parent wall clock",
- tolerance);
- }
-
- // the child sends an RBEGIN without any parameters initially
- // setting rset_enabled to false, means the RSET should not store any metrics
- // to store metrics, the RBEGIN needs to have timestamps
- parser->user.replay.start_time = 0;
- parser->user.replay.end_time = 0;
- parser->user.replay.start_time_ut = 0;
- parser->user.replay.end_time_ut = 0;
- parser->user.replay.wall_clock_time = 0;
- parser->user.replay.rset_enabled = false;
- return PARSER_RC_OK;
-}
-
-PARSER_RC pluginsd_replay_set(char **words, size_t num_words, PARSER *parser) {
- int idx = 1;
- ssize_t slot = pluginsd_parse_rrd_slot(words, num_words);
- if(slot >= 0) idx++;
-
- char *dimension = get_word(words, num_words, idx++);
- char *value_str = get_word(words, num_words, idx++);
- char *flags_str = get_word(words, num_words, idx++);
-
- RRDHOST *host = pluginsd_require_scope_host(parser, PLUGINSD_KEYWORD_REPLAY_SET);
- if(!host) return PLUGINSD_DISABLE_PLUGIN(parser, NULL, NULL);
-
- RRDSET *st = pluginsd_require_scope_chart(parser, PLUGINSD_KEYWORD_REPLAY_SET, PLUGINSD_KEYWORD_REPLAY_BEGIN);
- if(!st) return PLUGINSD_DISABLE_PLUGIN(parser, NULL, NULL);
-
- if(!parser->user.replay.rset_enabled) {
- nd_log_limit_static_thread_var(erl, 1, 0);
- nd_log_limit(&erl, NDLS_COLLECTORS, NDLP_ERR,
- "PLUGINSD: 'host:%s/chart:%s' got a %s but it is disabled by %s errors",
- rrdhost_hostname(host), rrdset_id(st), PLUGINSD_KEYWORD_REPLAY_SET, PLUGINSD_KEYWORD_REPLAY_BEGIN);
-
- // we have to return OK here
- return PARSER_RC_OK;
- }
-
- RRDDIM *rd = pluginsd_acquire_dimension(host, st, dimension, slot, PLUGINSD_KEYWORD_REPLAY_SET);
- if(!rd) return PLUGINSD_DISABLE_PLUGIN(parser, NULL, NULL);
-
- st->pluginsd.set = true;
-
- if (unlikely(!parser->user.replay.start_time || !parser->user.replay.end_time)) {
- netdata_log_error("PLUGINSD: 'host:%s/chart:%s/dim:%s' got a %s with invalid timestamps %ld to %ld from a %s. Disabling it.",
- rrdhost_hostname(host),
- rrdset_id(st),
- dimension,
- PLUGINSD_KEYWORD_REPLAY_SET,
- parser->user.replay.start_time,
- parser->user.replay.end_time,
- PLUGINSD_KEYWORD_REPLAY_BEGIN);
- return PLUGINSD_DISABLE_PLUGIN(parser, NULL, NULL);
- }
-
- if (unlikely(!value_str || !*value_str))
- value_str = "NAN";
-
- if(unlikely(!flags_str))
- flags_str = "";
-
- if (likely(value_str)) {
- RRDDIM_FLAGS rd_flags = rrddim_flag_check(rd, RRDDIM_FLAG_OBSOLETE | RRDDIM_FLAG_ARCHIVED);
-
- if(!(rd_flags & RRDDIM_FLAG_ARCHIVED)) {
- NETDATA_DOUBLE value = str2ndd_encoded(value_str, NULL);
- SN_FLAGS flags = pluginsd_parse_storage_number_flags(flags_str);
-
- if (!netdata_double_isnumber(value) || (flags == SN_EMPTY_SLOT)) {
- value = NAN;
- flags = SN_EMPTY_SLOT;
- }
-
- rrddim_store_metric(rd, parser->user.replay.end_time_ut, value, flags);
- rd->collector.last_collected_time.tv_sec = parser->user.replay.end_time;
- rd->collector.last_collected_time.tv_usec = 0;
- rd->collector.counter++;
- }
- else {
- nd_log_limit_static_global_var(erl, 1, 0);
- nd_log_limit(&erl, NDLS_COLLECTORS, NDLP_WARNING,
- "PLUGINSD: 'host:%s/chart:%s/dim:%s' has the ARCHIVED flag set, but it is replicated. "
- "Ignoring data.",
- rrdhost_hostname(st->rrdhost), rrdset_id(st), rrddim_name(rd));
- }
- }
-
- return PARSER_RC_OK;
-}
-
-PARSER_RC pluginsd_replay_rrddim_collection_state(char **words, size_t num_words, PARSER *parser) {
- if(parser->user.replay.rset_enabled == false)
- return PARSER_RC_OK;
-
- int idx = 1;
- ssize_t slot = pluginsd_parse_rrd_slot(words, num_words);
- if(slot >= 0) idx++;
-
- char *dimension = get_word(words, num_words, idx++);
- char *last_collected_ut_str = get_word(words, num_words, idx++);
- char *last_collected_value_str = get_word(words, num_words, idx++);
- char *last_calculated_value_str = get_word(words, num_words, idx++);
- char *last_stored_value_str = get_word(words, num_words, idx++);
-
- RRDHOST *host = pluginsd_require_scope_host(parser, PLUGINSD_KEYWORD_REPLAY_RRDDIM_STATE);
- if(!host) return PLUGINSD_DISABLE_PLUGIN(parser, NULL, NULL);
-
- RRDSET *st = pluginsd_require_scope_chart(parser, PLUGINSD_KEYWORD_REPLAY_RRDDIM_STATE, PLUGINSD_KEYWORD_REPLAY_BEGIN);
- if(!st) return PLUGINSD_DISABLE_PLUGIN(parser, NULL, NULL);
-
- if(st->pluginsd.set) {
- // reset pos to reuse the same RDAs
- st->pluginsd.pos = 0;
- st->pluginsd.set = false;
- }
-
- RRDDIM *rd = pluginsd_acquire_dimension(host, st, dimension, slot, PLUGINSD_KEYWORD_REPLAY_RRDDIM_STATE);
- if(!rd) return PLUGINSD_DISABLE_PLUGIN(parser, NULL, NULL);
-
- usec_t dim_last_collected_ut = (usec_t)rd->collector.last_collected_time.tv_sec * USEC_PER_SEC + (usec_t)rd->collector.last_collected_time.tv_usec;
- usec_t last_collected_ut = last_collected_ut_str ? str2ull_encoded(last_collected_ut_str) : 0;
- if(last_collected_ut > dim_last_collected_ut) {
- rd->collector.last_collected_time.tv_sec = (time_t)(last_collected_ut / USEC_PER_SEC);
- rd->collector.last_collected_time.tv_usec = (last_collected_ut % USEC_PER_SEC);
- }
-
- rd->collector.last_collected_value = last_collected_value_str ? str2ll_encoded(last_collected_value_str) : 0;
- rd->collector.last_calculated_value = last_calculated_value_str ? str2ndd_encoded(last_calculated_value_str, NULL) : 0;
- rd->collector.last_stored_value = last_stored_value_str ? str2ndd_encoded(last_stored_value_str, NULL) : 0.0;
-
- return PARSER_RC_OK;
-}
-
-PARSER_RC pluginsd_replay_rrdset_collection_state(char **words, size_t num_words, PARSER *parser) {
- if(parser->user.replay.rset_enabled == false)
- return PARSER_RC_OK;
-
- char *last_collected_ut_str = get_word(words, num_words, 1);
- char *last_updated_ut_str = get_word(words, num_words, 2);
-
- RRDHOST *host = pluginsd_require_scope_host(parser, PLUGINSD_KEYWORD_REPLAY_RRDSET_STATE);
- if(!host) return PLUGINSD_DISABLE_PLUGIN(parser, NULL, NULL);
-
- RRDSET *st = pluginsd_require_scope_chart(parser, PLUGINSD_KEYWORD_REPLAY_RRDSET_STATE,
- PLUGINSD_KEYWORD_REPLAY_BEGIN);
- if(!st) return PLUGINSD_DISABLE_PLUGIN(parser, NULL, NULL);
-
- usec_t chart_last_collected_ut = (usec_t)st->last_collected_time.tv_sec * USEC_PER_SEC + (usec_t)st->last_collected_time.tv_usec;
- usec_t last_collected_ut = last_collected_ut_str ? str2ull_encoded(last_collected_ut_str) : 0;
- if(last_collected_ut > chart_last_collected_ut) {
- st->last_collected_time.tv_sec = (time_t)(last_collected_ut / USEC_PER_SEC);
- st->last_collected_time.tv_usec = (last_collected_ut % USEC_PER_SEC);
- }
-
- usec_t chart_last_updated_ut = (usec_t)st->last_updated.tv_sec * USEC_PER_SEC + (usec_t)st->last_updated.tv_usec;
- usec_t last_updated_ut = last_updated_ut_str ? str2ull_encoded(last_updated_ut_str) : 0;
- if(last_updated_ut > chart_last_updated_ut) {
- st->last_updated.tv_sec = (time_t)(last_updated_ut / USEC_PER_SEC);
- st->last_updated.tv_usec = (last_updated_ut % USEC_PER_SEC);
- }
-
- st->counter++;
- st->counter_done++;
-
- return PARSER_RC_OK;
-}
-
-PARSER_RC pluginsd_replay_end(char **words, size_t num_words, PARSER *parser) {
- if (num_words < 7) { // accepts 7, but the 7th is optional
- netdata_log_error("REPLAY: malformed " PLUGINSD_KEYWORD_REPLAY_END " command");
- return PARSER_RC_ERROR;
- }
-
- const char *update_every_child_txt = get_word(words, num_words, 1);
- const char *first_entry_child_txt = get_word(words, num_words, 2);
- const char *last_entry_child_txt = get_word(words, num_words, 3);
- const char *start_streaming_txt = get_word(words, num_words, 4);
- const char *first_entry_requested_txt = get_word(words, num_words, 5);
- const char *last_entry_requested_txt = get_word(words, num_words, 6);
- const char *child_world_time_txt = get_word(words, num_words, 7); // optional
-
- time_t update_every_child = (time_t) str2ull_encoded(update_every_child_txt);
- time_t first_entry_child = (time_t) str2ull_encoded(first_entry_child_txt);
- time_t last_entry_child = (time_t) str2ull_encoded(last_entry_child_txt);
-
- bool start_streaming = (strcmp(start_streaming_txt, "true") == 0);
- time_t first_entry_requested = (time_t) str2ull_encoded(first_entry_requested_txt);
- time_t last_entry_requested = (time_t) str2ull_encoded(last_entry_requested_txt);
-
- // the optional child world time
- time_t child_world_time = (child_world_time_txt && *child_world_time_txt) ? (time_t) str2ull_encoded(
- child_world_time_txt) : now_realtime_sec();
-
- RRDHOST *host = pluginsd_require_scope_host(parser, PLUGINSD_KEYWORD_REPLAY_END);
- if(!host) return PLUGINSD_DISABLE_PLUGIN(parser, NULL, NULL);
-
- RRDSET *st = pluginsd_require_scope_chart(parser, PLUGINSD_KEYWORD_REPLAY_END, PLUGINSD_KEYWORD_REPLAY_BEGIN);
- if(!st) return PLUGINSD_DISABLE_PLUGIN(parser, NULL, NULL);
-
-#ifdef NETDATA_LOG_REPLICATION_REQUESTS
- internal_error(true,
- "PLUGINSD REPLAY: 'host:%s/chart:%s': got a " PLUGINSD_KEYWORD_REPLAY_END " child db from %llu to %llu, start_streaming %s, had requested from %llu to %llu, wall clock %llu",
- rrdhost_hostname(host), rrdset_id(st),
- (unsigned long long)first_entry_child, (unsigned long long)last_entry_child,
- start_streaming?"true":"false",
- (unsigned long long)first_entry_requested, (unsigned long long)last_entry_requested,
- (unsigned long long)child_world_time
- );
-#endif
-
- parser->user.data_collections_count++;
-
- if(parser->user.replay.rset_enabled && st->rrdhost->receiver) {
- time_t now = now_realtime_sec();
- time_t started = st->rrdhost->receiver->replication_first_time_t;
- time_t current = parser->user.replay.end_time;
-
- if(started && current > started) {
- host->rrdpush_receiver_replication_percent = (NETDATA_DOUBLE) (current - started) * 100.0 / (NETDATA_DOUBLE) (now - started);
- worker_set_metric(WORKER_RECEIVER_JOB_REPLICATION_COMPLETION,
- host->rrdpush_receiver_replication_percent);
- }
- }
-
- parser->user.replay.start_time = 0;
- parser->user.replay.end_time = 0;
- parser->user.replay.start_time_ut = 0;
- parser->user.replay.end_time_ut = 0;
- parser->user.replay.wall_clock_time = 0;
- parser->user.replay.rset_enabled = false;
-
- st->counter++;
- st->counter_done++;
- store_metric_collection_completed();
-
-#ifdef NETDATA_LOG_REPLICATION_REQUESTS
- st->replay.start_streaming = false;
- st->replay.after = 0;
- st->replay.before = 0;
- if(start_streaming)
- st->replay.log_next_data_collection = true;
-#endif
-
- if (start_streaming) {
- if (st->update_every != update_every_child)
- rrdset_set_update_every_s(st, update_every_child);
-
- if(rrdset_flag_check(st, RRDSET_FLAG_RECEIVER_REPLICATION_IN_PROGRESS)) {
- rrdset_flag_set(st, RRDSET_FLAG_RECEIVER_REPLICATION_FINISHED);
- rrdset_flag_clear(st, RRDSET_FLAG_RECEIVER_REPLICATION_IN_PROGRESS);
- rrdset_flag_clear(st, RRDSET_FLAG_SYNC_CLOCK);
- rrdhost_receiver_replicating_charts_minus_one(st->rrdhost);
- }
-#ifdef NETDATA_LOG_REPLICATION_REQUESTS
- else
- internal_error(true, "REPLAY ERROR: 'host:%s/chart:%s' got a " PLUGINSD_KEYWORD_REPLAY_END " with enable_streaming = true, but there is no replication in progress for this chart.",
- rrdhost_hostname(host), rrdset_id(st));
-#endif
-
- pluginsd_clear_scope_chart(parser, PLUGINSD_KEYWORD_REPLAY_END);
-
- host->rrdpush_receiver_replication_percent = 100.0;
- worker_set_metric(WORKER_RECEIVER_JOB_REPLICATION_COMPLETION, host->rrdpush_receiver_replication_percent);
-
- return PARSER_RC_OK;
- }
-
- pluginsd_clear_scope_chart(parser, PLUGINSD_KEYWORD_REPLAY_END);
-
- rrdcontext_updated_retention_rrdset(st);
-
- bool ok = replicate_chart_request(send_to_plugin, parser, host, st,
- first_entry_child, last_entry_child, child_world_time,
- first_entry_requested, last_entry_requested);
- return ok ? PARSER_RC_OK : PARSER_RC_ERROR;
-}
diff --git a/src/collectors/plugins.d/pluginsd_replication.h b/src/collectors/plugins.d/pluginsd_replication.h
deleted file mode 100644
index 1c6f617e6..000000000
--- a/src/collectors/plugins.d/pluginsd_replication.h
+++ /dev/null
@@ -1,14 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-#ifndef NETDATA_PLUGINSD_REPLICATION_H
-#define NETDATA_PLUGINSD_REPLICATION_H
-
-#include "pluginsd_internals.h"
-
-PARSER_RC pluginsd_replay_begin(char **words, size_t num_words, PARSER *parser);
-PARSER_RC pluginsd_replay_set(char **words, size_t num_words, PARSER *parser);
-PARSER_RC pluginsd_replay_rrddim_collection_state(char **words, size_t num_words, PARSER *parser);
-PARSER_RC pluginsd_replay_rrdset_collection_state(char **words, size_t num_words, PARSER *parser);
-PARSER_RC pluginsd_replay_end(char **words, size_t num_words, PARSER *parser);
-
-#endif //NETDATA_PLUGINSD_REPLICATION_H
diff --git a/src/collectors/proc.plugin/README.md b/src/collectors/proc.plugin/README.md
index 79bfd8645..8523309c7 100644
--- a/src/collectors/proc.plugin/README.md
+++ b/src/collectors/proc.plugin/README.md
@@ -6,35 +6,35 @@ This plugin is not an external plugin, but one of Netdata's threads.
In detail, it collects metrics from:
-- `/proc/net/dev` (all network interfaces for all their values)
-- `/proc/diskstats` (all disks for all their values)
-- `/proc/mdstat` (status of RAID arrays)
-- `/proc/net/snmp` (total IPv4, TCP and UDP usage)
-- `/proc/net/snmp6` (total IPv6 usage)
-- `/proc/net/netstat` (more IPv4 usage)
-- `/proc/net/wireless` (wireless extension)
-- `/proc/net/stat/nf_conntrack` (connection tracking performance)
-- `/proc/net/stat/synproxy` (synproxy performance)
-- `/proc/net/ip_vs/stats` (IPVS connection statistics)
-- `/proc/stat` (CPU utilization and attributes)
-- `/proc/meminfo` (memory information)
-- `/proc/vmstat` (system performance)
-- `/proc/net/rpc/nfsd` (NFS server statistics for both v3 and v4 NFS servers)
-- `/sys/fs/cgroup` (Control Groups - Linux Containers)
-- `/proc/self/mountinfo` (mount points)
-- `/proc/interrupts` (total and per core hardware interrupts)
-- `/proc/softirqs` (total and per core software interrupts)
-- `/proc/loadavg` (system load and total processes running)
-- `/proc/pressure/{cpu,memory,io}` (pressure stall information)
-- `/proc/sys/kernel/random/entropy_avail` (random numbers pool availability - used in cryptography)
-- `/proc/spl/kstat/zfs/arcstats` (status of ZFS adaptive replacement cache)
-- `/proc/spl/kstat/zfs/pool/state` (state of ZFS pools)
-- `/sys/class/power_supply` (power supply properties)
-- `/sys/class/infiniband` (infiniband interconnect)
-- `/sys/class/drm` (AMD GPUs)
-- `ipc` (IPC semaphores and message queues)
-- `ksm` Kernel Same-Page Merging performance (several files under `/sys/kernel/mm/ksm`).
-- `netdata` (internal Netdata resources utilization)
+- `/proc/net/dev` (all network interfaces for all their values)
+- `/proc/diskstats` (all disks for all their values)
+- `/proc/mdstat` (status of RAID arrays)
+- `/proc/net/snmp` (total IPv4, TCP and UDP usage)
+- `/proc/net/snmp6` (total IPv6 usage)
+- `/proc/net/netstat` (more IPv4 usage)
+- `/proc/net/wireless` (wireless extension)
+- `/proc/net/stat/nf_conntrack` (connection tracking performance)
+- `/proc/net/stat/synproxy` (synproxy performance)
+- `/proc/net/ip_vs/stats` (IPVS connection statistics)
+- `/proc/stat` (CPU utilization and attributes)
+- `/proc/meminfo` (memory information)
+- `/proc/vmstat` (system performance)
+- `/proc/net/rpc/nfsd` (NFS server statistics for both v3 and v4 NFS servers)
+- `/sys/fs/cgroup` (Control Groups - Linux Containers)
+- `/proc/self/mountinfo` (mount points)
+- `/proc/interrupts` (total and per core hardware interrupts)
+- `/proc/softirqs` (total and per core software interrupts)
+- `/proc/loadavg` (system load and total processes running)
+- `/proc/pressure/{cpu,memory,io}` (pressure stall information)
+- `/proc/sys/kernel/random/entropy_avail` (random numbers pool availability - used in cryptography)
+- `/proc/spl/kstat/zfs/arcstats` (status of ZFS adaptive replacement cache)
+- `/proc/spl/kstat/zfs/pool/state` (state of ZFS pools)
+- `/sys/class/power_supply` (power supply properties)
+- `/sys/class/infiniband` (infiniband interconnect)
+- `/sys/class/drm` (AMD GPUs)
+- `ipc` (IPC semaphores and message queues)
+- `ksm` Kernel Same-Page Merging performance (several files under `/sys/kernel/mm/ksm`).
+- `netdata` (internal Netdata resources utilization)
- - -
@@ -48,47 +48,47 @@ Hopefully, the Linux kernel provides many metrics that can provide deep insights
### Monitored disk metrics
-- **I/O bandwidth/s (kb/s)**
+- **I/O bandwidth/s (kb/s)**
The amount of data transferred from and to the disk.
-- **Amount of discarded data (kb/s)**
-- **I/O operations/s**
+- **Amount of discarded data (kb/s)**
+- **I/O operations/s**
The number of I/O operations completed.
-- **Extended I/O operations/s**
+- **Extended I/O operations/s**
The number of extended I/O operations completed.
-- **Queued I/O operations**
+- **Queued I/O operations**
The number of currently queued I/O operations. For traditional disks that execute commands one after another, one of them is being run by the disk and the rest are just waiting in a queue.
-- **Backlog size (time in ms)**
+- **Backlog size (time in ms)**
The expected duration of the currently queued I/O operations.
-- **Utilization (time percentage)**
+- **Utilization (time percentage)**
The percentage of time the disk was busy with something. This is a very interesting metric, since for most disks, that execute commands sequentially, **this is the key indication of congestion**. A sequential disk that is 100% of the available time busy, has no time to do anything more, so even if the bandwidth or the number of operations executed by the disk is low, its capacity has been reached.
Of course, for newer disk technologies (like fusion cards) that are capable to execute multiple commands in parallel, this metric is just meaningless.
-- **Average I/O operation time (ms)**
+- **Average I/O operation time (ms)**
The average time for I/O requests issued to the device to be served. This includes the time spent by the requests in queue and the time spent servicing them.
-- **Average I/O operation time for extended operations (ms)**
+- **Average I/O operation time for extended operations (ms)**
The average time for extended I/O requests issued to the device to be served. This includes the time spent by the requests in queue and the time spent servicing them.
-- **Average I/O operation size (kb)**
+- **Average I/O operation size (kb)**
The average amount of data of the completed I/O operations.
-- **Average amount of discarded data (kb)**
+- **Average amount of discarded data (kb)**
The average amount of data of the completed discard operations.
-- **Average Service Time (ms)**
+- **Average Service Time (ms)**
The average service time for completed I/O operations. This metric is calculated using the total busy time of the disk and the number of completed operations. If the disk is able to execute multiple parallel operations the reporting average service time will be misleading.
-- **Average Service Time for extended I/O operations (ms)**
+- **Average Service Time for extended I/O operations (ms)**
The average service time for completed extended I/O operations.
-- **Merged I/O operations/s**
+- **Merged I/O operations/s**
The Linux kernel is capable of merging I/O operations. So, if two requests to read data from the disk are adjacent, the Linux kernel may merge them to one before giving them to disk. This metric measures the number of operations that have been merged by the Linux kernel.
-- **Merged discard operations/s**
-- **Total I/O time**
+- **Merged discard operations/s**
+- **Total I/O time**
The sum of the duration of all completed I/O operations. This number can exceed the interval if the disk is able to execute multiple I/O operations in parallel.
-- **Space usage**
+- **Space usage**
For mounted disks, Netdata will provide a chart for their space, with 3 dimensions:
- 1. free
- 2. used
- 3. reserved for root
-- **inode usage**
+ 1. free
+ 2. used
+ 3. reserved for root
+- **inode usage**
For mounted disks, Netdata will provide a chart for their inodes (number of file and directories), with 3 dimensions:
- 1. free
- 2. used
- 3. reserved for root
+ 1. free
+ 2. used
+ 3. reserved for root
### disk names
@@ -100,9 +100,9 @@ By default, Netdata will enable monitoring metrics only when they are not zero.
Netdata categorizes all block devices in 3 categories:
-1. physical disks (i.e. block devices that do not have child devices and are not partitions)
-2. virtual disks (i.e. block devices that have child devices - like RAID devices)
-3. disk partitions (i.e. block devices that are part of a physical disk)
+1. physical disks (i.e. block devices that do not have child devices and are not partitions)
+2. virtual disks (i.e. block devices that have child devices - like RAID devices)
+3. disk partitions (i.e. block devices that are part of a physical disk)
Performance metrics are enabled by default for all disk devices, except partitions and not-mounted virtual disks. Of course, you can enable/disable monitoring any block device by editing the Netdata configuration file.
@@ -118,7 +118,7 @@ mv netdata.conf.new netdata.conf
Then edit `netdata.conf` and find the following section. This is the basic plugin configuration.
-```
+```text
[plugin:proc:/proc/diskstats]
# enable new disks detected at runtime = yes
# performance metrics for physical disks = auto
@@ -133,7 +133,7 @@ Then edit `netdata.conf` and find the following section. This is the basic plugi
# extended operations for all disks = auto
# backlog for all disks = auto
# bcache for all disks = auto
- # bcache priority stats update every = 0
+ # bcache priority stats update every = off
# remove charts of removed disks = yes
# path to get block device = /sys/block/%s
# path to get block device bcache = /sys/block/%s/bcache
@@ -152,25 +152,25 @@ Then edit `netdata.conf` and find the following section. This is the basic plugi
For each virtual disk, physical disk and partition you will have a section like this:
-```
+```text
[plugin:proc:/proc/diskstats:sda]
- # enable = yes
- # enable performance metrics = auto
- # bandwidth = auto
- # operations = auto
- # merged operations = auto
- # i/o time = auto
- # queued operations = auto
- # utilization percentage = auto
+ # enable = yes
+ # enable performance metrics = auto
+ # bandwidth = auto
+ # operations = auto
+ # merged operations = auto
+ # i/o time = auto
+ # queued operations = auto
+ # utilization percentage = auto
# extended operations = auto
- # backlog = auto
+ # backlog = auto
```
For all configuration options:
-- `auto` = enable monitoring if the collected values are not zero
-- `yes` = enable monitoring
-- `no` = disable monitoring
+- `auto` = enable monitoring if the collected values are not zero
+- `yes` = enable monitoring
+- `no` = disable monitoring
Of course, to set options, you will have to uncomment them. The comments show the internal defaults.
@@ -180,14 +180,14 @@ After saving `/etc/netdata/netdata.conf`, restart your Netdata to apply them.
You can pretty easy disable performance metrics for individual device, for ex.:
-```
+```text
[plugin:proc:/proc/diskstats:sda]
- enable performance metrics = no
+ enable performance metrics = no
```
But sometimes you need disable performance metrics for all devices with the same type, to do it you need to figure out device type from `/proc/diskstats` for ex.:
-```
+```text
7 0 loop0 1651 0 3452 168 0 0 0 0 0 8 168
7 1 loop1 4955 0 11924 880 0 0 0 0 0 64 880
7 2 loop2 36 0 216 4 0 0 0 0 0 4 4
@@ -200,7 +200,7 @@ But sometimes you need disable performance metrics for all devices with the same
All zram devices starts with `251` number and all loop devices starts with `7`.
So, to disable performance metrics for all loop devices you could add `performance metrics for disks with major 7 = no` to `[plugin:proc:/proc/diskstats]` section.
-```
+```text
[plugin:proc:/proc/diskstats]
performance metrics for disks with major 7 = no
```
@@ -209,34 +209,34 @@ So, to disable performance metrics for all loop devices you could add `performan
### Monitored RAID array metrics
-1. **Health** Number of failed disks in every array (aggregate chart).
+1. **Health** Number of failed disks in every array (aggregate chart).
-2. **Disks stats**
+2. **Disks stats**
-- total (number of devices array ideally would have)
-- inuse (number of devices currently are in use)
+ - total (number of devices array ideally would have)
+ - inuse (number of devices currently are in use)
-3. **Mismatch count**
+3. **Mismatch count**
-- unsynchronized blocks
+ - unsynchronized blocks
-4. **Current status**
+4. **Current status**
-- resync in percent
-- recovery in percent
-- reshape in percent
-- check in percent
+ - resync in percent
+ - recovery in percent
+ - reshape in percent
+ - check in percent
-5. **Operation status** (if resync/recovery/reshape/check is active)
+5. **Operation status** (if resync/recovery/reshape/check is active)
-- finish in minutes
-- speed in megabytes/s
+ - finish in minutes
+ - speed in megabytes/s
-6. **Nonredundant array availability**
+6. **Non-redundant array availability**
#### configuration
-```
+```text
[plugin:proc:/proc/mdstat]
# faulty devices = yes
# nonredundant arrays availability = yes
@@ -267,7 +267,7 @@ If your system has more than 50 processors and you would like to see the CPU the
state charts that are automatically disabled, you can set the following boolean options in the
`[plugin:proc:/proc/stat]` section.
-```conf
+```text
keep per core files open = yes
keep cpuidle files open = yes
core_throttle_count = yes
@@ -311,50 +311,50 @@ each state.
### Monitored memory metrics
-- Amount of memory swapped in/out
-- Amount of memory paged from/to disk
-- Number of memory page faults
-- Number of out of memory kills
-- Number of NUMA events
+- Amount of memory swapped in/out
+- Amount of memory paged from/to disk
+- Number of memory page faults
+- Number of out of memory kills
+- Number of NUMA events
### Configuration
-```conf
+```text
[plugin:proc:/proc/vmstat]
- filename to monitor = /proc/vmstat
- swap i/o = auto
- disk i/o = yes
- memory page faults = yes
- out of memory kills = yes
- system-wide numa metric summary = auto
+ filename to monitor = /proc/vmstat
+ swap i/o = auto
+ disk i/o = yes
+ memory page faults = yes
+ out of memory kills = yes
+ system-wide numa metric summary = auto
```
## Monitoring Network Interfaces
### Monitored network interface metrics
-- **Physical Network Interfaces Aggregated Bandwidth (kilobits/s)**
+- **Physical Network Interfaces Aggregated Bandwidth (kilobits/s)**
The amount of data received and sent through all physical interfaces in the system. This is the source of data for the Net Inbound and Net Outbound dials in the System Overview section.
-- **Bandwidth (kilobits/s)**
+- **Bandwidth (kilobits/s)**
The amount of data received and sent through the interface.
-- **Packets (packets/s)**
+- **Packets (packets/s)**
The number of packets received, packets sent, and multicast packets transmitted through the interface.
-- **Interface Errors (errors/s)**
+- **Interface Errors (errors/s)**
The number of errors for the inbound and outbound traffic on the interface.
-- **Interface Drops (drops/s)**
+- **Interface Drops (drops/s)**
The number of packets dropped for the inbound and outbound traffic on the interface.
-- **Interface FIFO Buffer Errors (errors/s)**
+- **Interface FIFO Buffer Errors (errors/s)**
The number of FIFO buffer errors encountered while receiving and transmitting data through the interface.
-- **Compressed Packets (packets/s)**
+- **Compressed Packets (packets/s)**
The number of compressed packets transmitted or received by the device driver.
-- **Network Interface Events (events/s)**
+- **Network Interface Events (events/s)**
The number of packet framing errors, collisions detected on the interface, and carrier losses detected by the device driver.
By default Netdata will enable monitoring metrics only when they are not zero. If they are constantly zero they are ignored. Metrics that will start having values, after Netdata is started, will be detected and charts will be automatically added to the dashboard (a refresh of the dashboard is needed for them to appear though).
@@ -363,7 +363,7 @@ By default Netdata will enable monitoring metrics only when they are not zero. I
The settings for monitoring wireless is in the `[plugin:proc:/proc/net/wireless]` section of your `netdata.conf` file.
-```conf
+```text
status for all interfaces = yes
quality for all interfaces = yes
discarded packets for all interfaces = yes
@@ -372,62 +372,62 @@ The settings for monitoring wireless is in the `[plugin:proc:/proc/net/wireless]
You can set the following values for each configuration option:
-- `auto` = enable monitoring if the collected values are not zero
-- `yes` = enable monitoring
-- `no` = disable monitoring
+- `auto` = enable monitoring if the collected values are not zero
+- `yes` = enable monitoring
+- `no` = disable monitoring
#### Monitored wireless interface metrics
-- **Status**
+- **Status**
The current state of the interface. This is a device-dependent option.
-- **Link**
- Overall quality of the link.
+- **Link**
+ Overall quality of the link.
-- **Level**
+- **Level**
Received signal strength (RSSI), which indicates how strong the received signal is.
-
-- **Noise**
- Background noise level.
-
-- **Discarded packets**
- Discarded packets for: Number of packets received with a different NWID or ESSID (`nwid`), unable to decrypt (`crypt`), hardware was not able to properly re-assemble the link layer fragments (`frag`), packets failed to deliver (`retry`), and packets lost in relation with specific wireless operations (`misc`).
-
-- **Missed beacon**
+
+- **Noise**
+ Background noise level.
+
+- **Discarded packets**
+ Discarded packets for: Number of packets received with a different NWID or ESSID (`nwid`), unable to decrypt (`crypt`), hardware was not able to properly re-assemble the link layer fragments (`frag`), packets failed to deliver (`retry`), and packets lost in relation with specific wireless operations (`misc`).
+
+- **Missed beacon**
Number of periodic beacons from the cell or the access point the interface has missed.
-
-#### Wireless configuration
+
+#### Wireless configuration
#### alerts
There are several alerts defined in `health.d/net.conf`.
-The tricky ones are `inbound packets dropped` and `inbound packets dropped ratio`. They have quite a strict policy so that they warn users about possible issues. These alerts can be annoying for some network configurations. It is especially true for some bonding configurations if an interface is a child or a bonding interface itself. If it is expected to have a certain number of drops on an interface for a certain network configuration, a separate alert with different triggering thresholds can be created or the existing one can be disabled for this specific interface. It can be done with the help of the [families](/src/health/REFERENCE.md#alert-line-families) line in the alert configuration. For example, if you want to disable the `inbound packets dropped` alert for `eth0`, set `families: !eth0 *` in the alert definition for `template: inbound_packets_dropped`.
+The tricky ones are `inbound packets dropped` and `inbound packets dropped ratio`. They have quite a strict policy so that they warn users about possible issues. These alerts can be annoying for some network configurations. It is especially true for some bonding configurations if an interface is a child or a bonding interface itself. If it is expected to have a certain number of drops on an interface for a certain network configuration, a separate alert with different triggering thresholds can be created or the existing one can be disabled for this specific interface. It can be done with the help of the families line in the alert configuration. For example, if you want to disable the `inbound packets dropped` alert for `eth0`, set `families: !eth0 *` in the alert definition for `template: inbound_packets_dropped`.
#### configuration
Module configuration:
-```
+```text
[plugin:proc:/proc/net/dev]
- # filename to monitor = /proc/net/dev
- # path to get virtual interfaces = /sys/devices/virtual/net/%s
- # path to get net device speed = /sys/class/net/%s/speed
- # enable new interfaces detected at runtime = auto
- # bandwidth for all interfaces = auto
- # packets for all interfaces = auto
- # errors for all interfaces = auto
- # drops for all interfaces = auto
- # fifo for all interfaces = auto
- # compressed packets for all interfaces = auto
- # frames, collisions, carrier counters for all interfaces = auto
- # disable by default interfaces matching = lo fireqos* *-ifb
- # refresh interface speed every seconds = 10
+ # filename to monitor = /proc/net/dev
+ # path to get virtual interfaces = /sys/devices/virtual/net/%s
+ # path to get net device speed = /sys/class/net/%s/speed
+ # enable new interfaces detected at runtime = auto
+ # bandwidth for all interfaces = auto
+ # packets for all interfaces = auto
+ # errors for all interfaces = auto
+ # drops for all interfaces = auto
+ # fifo for all interfaces = auto
+ # compressed packets for all interfaces = auto
+ # frames, collisions, carrier counters for all interfaces = auto
+ # disable by default interfaces matching = lo fireqos* *-ifb
+ # refresh interface speed every seconds = 10
```
Per interface configuration:
-```
+```text
[plugin:proc:/proc/net/dev:enp0s3]
# enabled = yes
# virtual = no
@@ -444,8 +444,6 @@ Per interface configuration:
![image6](https://cloud.githubusercontent.com/assets/2662304/14253733/53550b16-fa95-11e5-8d9d-4ed171df4735.gif)
----
-
SYNPROXY is a TCP SYN packets proxy. It can be used to protect any TCP server (like a web server) from SYN floods and similar DDos attacks.
SYNPROXY is a netfilter module, in the Linux kernel (since version 3.12). It is optimized to handle millions of packets per second utilizing all CPUs available without any concurrency locking between the connections.
@@ -454,8 +452,8 @@ The net effect of this, is that the real servers will not notice any change duri
Netdata does not enable SYNPROXY. It just uses the SYNPROXY metrics exposed by your kernel, so you will first need to configure it. The hard way is to run iptables SYNPROXY commands directly on the console. An easier way is to use [FireHOL](https://firehol.org/), which, is a firewall manager for iptables. FireHOL can configure SYNPROXY using the following setup guides:
-- **[Working with SYNPROXY](https://github.com/firehol/firehol/wiki/Working-with-SYNPROXY)**
-- **[Working with SYNPROXY and traps](https://github.com/firehol/firehol/wiki/Working-with-SYNPROXY-and-traps)**
+- **[Working with SYNPROXY](https://github.com/firehol/firehol/wiki/Working-with-SYNPROXY)**
+- **[Working with SYNPROXY and traps](https://github.com/firehol/firehol/wiki/Working-with-SYNPROXY-and-traps)**
### Real-time monitoring of Linux Anti-DDoS
@@ -463,10 +461,10 @@ Netdata is able to monitor in real-time (per second updates) the operation of th
It visualizes 4 charts:
-1. TCP SYN Packets received on ports operated by SYNPROXY
-2. TCP Cookies (valid, invalid, retransmits)
-3. Connections Reopened
-4. Entries used
+1. TCP SYN Packets received on ports operated by SYNPROXY
+2. TCP Cookies (valid, invalid, retransmits)
+3. Connections Reopened
+4. Entries used
Example image:
@@ -483,37 +481,37 @@ battery capacity.
Depending on the underlying driver, it may provide the following charts
and metrics:
-1. Capacity: The power supply capacity expressed as a percentage.
+1. Capacity: The power supply capacity expressed as a percentage.
- - capacity_now
+ - capacity_now
-2. Charge: The charge for the power supply, expressed as amphours.
+2. Charge: The charge for the power supply, expressed as amp-hours.
- - charge_full_design
- - charge_full
- - charge_now
- - charge_empty
- - charge_empty_design
+ - charge_full_design
+ - charge_full
+ - charge_now
+ - charge_empty
+ - charge_empty_design
-3. Energy: The energy for the power supply, expressed as watthours.
+3. Energy: The energy for the power supply, expressed as watthours.
- - energy_full_design
- - energy_full
- - energy_now
- - energy_empty
- - energy_empty_design
+ - energy_full_design
+ - energy_full
+ - energy_now
+ - energy_empty
+ - energy_empty_design
-4. Voltage: The voltage for the power supply, expressed as volts.
+4. Voltage: The voltage for the power supply, expressed as volts.
- - voltage_max_design
- - voltage_max
- - voltage_now
- - voltage_min
- - voltage_min_design
+ - voltage_max_design
+ - voltage_max
+ - voltage_now
+ - voltage_min
+ - voltage_min_design
-#### configuration
+### configuration
-```
+```text
[plugin:proc:/sys/class/power_supply]
# battery capacity = yes
# battery charge = no
@@ -524,18 +522,18 @@ and metrics:
# directory to monitor = /sys/class/power_supply
```
-#### notes
+### notes
-- Most drivers provide at least the first chart. Battery powered ACPI
+- Most drivers provide at least the first chart. Battery powered ACPI
compliant systems (like most laptops) provide all but the third, but do
not provide all of the metrics for each chart.
-- Current, energy, and voltages are reported with a *very* high precision
+- Current, energy, and voltages are reported with a *very* high precision
by the power_supply framework. Usually, this is far higher than the
actual hardware supports reporting, so expect to see changes in these
charts jump instead of scaling smoothly.
-- If `max` or `full` attribute is defined by the driver, but not a
+- If `max` or `full` attribute is defined by the driver, but not a
corresponding `min` or `empty` attribute, then Netdata will still provide
the corresponding `min` or `empty`, which will then always read as zero.
This way, alerts which match on these will still work.
@@ -548,17 +546,17 @@ This module monitors every active Infiniband port. It provides generic counters
Each port will have its counters metrics monitored, grouped in the following charts:
-- **Bandwidth usage**
+- **Bandwidth usage**
Sent/Received data, in KB/s
-- **Packets Statistics**
+- **Packets Statistics**
Sent/Received packets, in 3 categories: total, unicast and multicast.
-- **Errors Statistics**
+- **Errors Statistics**
Many errors counters are provided, presenting statistics for:
- - Packets: malformed, sent/received discarded by card/switch, missing resource
- - Link: downed, recovered, integrity error, minor error
- - Other events: Tick Wait to send, buffer overrun
+ - Packets: malformed, sent/received discarded by card/switch, missing resource
+ - Link: downed, recovered, integrity error, minor error
+ - Other events: Tick Wait to send, buffer overrun
If your vendor is supported, you'll also get HW-Counters statistics. These being vendor specific, please refer to their documentation.
@@ -568,7 +566,7 @@ If your vendor is supported, you'll also get HW-Counters statistics. These being
Default configuration will monitor only enabled infiniband ports, and refresh newly activated or created ports every 30 seconds
-```
+```text
[plugin:proc:/sys/class/infiniband]
# dirname to monitor = /sys/class/infiniband
# bandwidth counters = yes
@@ -578,7 +576,7 @@ Default configuration will monitor only enabled infiniband ports, and refresh ne
# hardware errors counters = auto
# monitor only ports being active = auto
# disable by default interfaces matching =
- # refresh ports state every seconds = 30
+ # refresh ports state every = 30s
```
## AMD GPUs
@@ -589,45 +587,46 @@ This module monitors every AMD GPU card discovered at agent startup.
The following charts will be provided:
-- **GPU utilization**
-- **GPU memory utilization**
-- **GPU clock frequency**
-- **GPU memory clock frequency**
-- **VRAM memory usage percentage**
-- **VRAM memory usage**
-- **visible VRAM memory usage percentage**
-- **visible VRAM memory usage**
-- **GTT memory usage percentage**
-- **GTT memory usage**
+- **GPU utilization**
+- **GPU memory utilization**
+- **GPU clock frequency**
+- **GPU memory clock frequency**
+- **VRAM memory usage percentage**
+- **VRAM memory usage**
+- **visible VRAM memory usage percentage**
+- **visible VRAM memory usage**
+- **GTT memory usage percentage**
+- **GTT memory usage**
### configuration
The `drm` path can be configured if it differs from the default:
-```
+```text
[plugin:proc:/sys/class/drm]
# directory to monitor = /sys/class/drm
```
-> [!NOTE]
-> Temperature, fan speed, voltage and power metrics for AMD GPUs can be monitored using the [Sensors](/src/collectors/charts.d.plugin/sensors/README.md) plugin.
+> **Note**
+>
+> Temperature, fan speed, voltage and power metrics for AMD GPUs can be monitored using the [Sensors](/src/go/plugin/go.d/modules/sensors/README.md) plugin.
## IPC
### Monitored IPC metrics
-- **number of messages in message queues**
-- **amount of memory used by message queues**
-- **number of semaphores**
-- **number of semaphore arrays**
-- **number of shared memory segments**
-- **amount of memory used by shared memory segments**
+- **number of messages in message queues**
+- **amount of memory used by message queues**
+- **number of semaphores**
+- **number of semaphore arrays**
+- **number of shared memory segments**
+- **amount of memory used by shared memory segments**
As far as the message queue charts are dynamic, sane limits are applied for the number of dimensions per chart (the limit is configurable).
### configuration
-```
+```text
[plugin:proc:ipc]
# message queues = yes
# semaphore totals = yes
@@ -636,5 +635,3 @@ As far as the message queue charts are dynamic, sane limits are applied for the
# shm filename to monitor = /proc/sysvipc/shm
# max dimensions in memory allowed = 50
```
-
-
diff --git a/src/collectors/proc.plugin/integrations/system_statistics.md b/src/collectors/proc.plugin/integrations/system_statistics.md
index 0b6e38820..1381bdb1d 100644
--- a/src/collectors/proc.plugin/integrations/system_statistics.md
+++ b/src/collectors/proc.plugin/integrations/system_statistics.md
@@ -150,8 +150,8 @@ The file format is a modified INI syntax. The general structure is:
[section2]
option3 = some third value
```
-You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the
+Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
```bash
cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
diff --git a/src/collectors/proc.plugin/ipc.c b/src/collectors/proc.plugin/ipc.c
index 5b47116b9..c280254ac 100644
--- a/src/collectors/proc.plugin/ipc.c
+++ b/src/collectors/proc.plugin/ipc.c
@@ -182,7 +182,7 @@ static inline int ipc_sem_get_status(struct ipc_status *st) {
return 0;
}
-int ipc_msq_get_info(char *msg_filename, struct message_queue **message_queue_root) {
+static int ipc_msq_get_info(const char *msg_filename, struct message_queue **message_queue_root) {
static procfile *ff;
struct message_queue *msq;
@@ -238,7 +238,7 @@ int ipc_msq_get_info(char *msg_filename, struct message_queue **message_queue_ro
return 0;
}
-int ipc_shm_get_info(char *shm_filename, struct shm_stats *shm) {
+static int ipc_shm_get_info(const char *shm_filename, struct shm_stats *shm) {
static procfile *ff;
if(unlikely(!ff)) {
@@ -287,10 +287,10 @@ int do_ipc(int update_every, usec_t dt) {
static const RRDVAR_ACQUIRED *arrays_max = NULL, *semaphores_max = NULL;
static RRDSET *st_arrays = NULL;
static RRDDIM *rd_arrays = NULL;
- static char *msg_filename = NULL;
+ static const char *msg_filename = NULL;
static struct message_queue *message_queue_root = NULL;
static long long dimensions_limit;
- static char *shm_filename = NULL;
+ static const char *shm_filename = NULL;
if(unlikely(do_sem == -1)) {
do_msg = config_get_boolean("plugin:proc:ipc", "message queues", CONFIG_BOOLEAN_YES);
diff --git a/src/collectors/proc.plugin/plugin_proc.c b/src/collectors/proc.plugin/plugin_proc.c
index b4a856467..0a1903ca0 100644
--- a/src/collectors/proc.plugin/plugin_proc.c
+++ b/src/collectors/proc.plugin/plugin_proc.c
@@ -226,9 +226,8 @@ void *proc_main(void *ptr)
worker_register_job_name(i, proc_modules[i].dim);
}
- usec_t step = localhost->rrd_update_every * USEC_PER_SEC;
heartbeat_t hb;
- heartbeat_init(&hb);
+ heartbeat_init(&hb, localhost->rrd_update_every * USEC_PER_SEC);
inside_lxc_container = is_lxcfs_proc_mounted();
is_mem_swap_enabled = is_swap_enabled();
@@ -245,7 +244,7 @@ void *proc_main(void *ptr)
while(service_running(SERVICE_COLLECTORS)) {
worker_is_idle();
- usec_t hb_dt = heartbeat_next(&hb, step);
+ usec_t hb_dt = heartbeat_next(&hb);
if(unlikely(!service_running(SERVICE_COLLECTORS)))
break;
@@ -279,7 +278,7 @@ int get_numa_node_count(void)
char name[FILENAME_MAX + 1];
snprintfz(name, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/sys/devices/system/node");
- char *dirname = config_get("plugin:proc:/sys/devices/system/node", "directory to monitor", name);
+ const char *dirname = config_get("plugin:proc:/sys/devices/system/node", "directory to monitor", name);
DIR *dir = opendir(dirname);
if (dir) {
diff --git a/src/collectors/proc.plugin/proc_diskstats.c b/src/collectors/proc.plugin/proc_diskstats.c
index 015a985cc..2c7320aa1 100644
--- a/src/collectors/proc.plugin/proc_diskstats.c
+++ b/src/collectors/proc.plugin/proc_diskstats.c
@@ -81,30 +81,25 @@ static struct disk {
usec_t bcache_priority_stats_elapsed_usec;
ND_DISK_IO disk_io;
+ ND_DISK_OPS disk_ops;
+ ND_DISK_QOPS disk_qops;
+ ND_DISK_UTIL disk_util;
+ ND_DISK_BUSY disk_busy;
+ ND_DISK_IOTIME disk_iotime;
+ ND_DISK_AWAIT disk_await;
+ ND_DISK_SVCTM disk_svctm;
+ ND_DISK_AVGSZ disk_avgsz;
RRDSET *st_ext_io;
RRDDIM *rd_io_discards;
- RRDSET *st_ops;
- RRDDIM *rd_ops_reads;
- RRDDIM *rd_ops_writes;
-
RRDSET *st_ext_ops;
RRDDIM *rd_ops_discards;
RRDDIM *rd_ops_flushes;
- RRDSET *st_qops;
- RRDDIM *rd_qops_operations;
-
RRDSET *st_backlog;
RRDDIM *rd_backlog_backlog;
- RRDSET *st_busy;
- RRDDIM *rd_busy_busy;
-
- RRDSET *st_util;
- RRDDIM *rd_util_utilization;
-
RRDSET *st_mops;
RRDDIM *rd_mops_reads;
RRDDIM *rd_mops_writes;
@@ -112,32 +107,17 @@ static struct disk {
RRDSET *st_ext_mops;
RRDDIM *rd_mops_discards;
- RRDSET *st_iotime;
- RRDDIM *rd_iotime_reads;
- RRDDIM *rd_iotime_writes;
-
RRDSET *st_ext_iotime;
RRDDIM *rd_iotime_discards;
RRDDIM *rd_iotime_flushes;
- RRDSET *st_await;
- RRDDIM *rd_await_reads;
- RRDDIM *rd_await_writes;
-
RRDSET *st_ext_await;
RRDDIM *rd_await_discards;
RRDDIM *rd_await_flushes;
- RRDSET *st_avgsz;
- RRDDIM *rd_avgsz_reads;
- RRDDIM *rd_avgsz_writes;
-
RRDSET *st_ext_avgsz;
RRDDIM *rd_avgsz_discards;
- RRDSET *st_svctm;
- RRDDIM *rd_svctm_svctm;
-
RRDSET *st_bcache_size;
RRDDIM *rd_bcache_dirty_size;
@@ -180,16 +160,16 @@ static struct disk {
#define rrdset_obsolete_and_pointer_null(st) do { if(st) { rrdset_is_obsolete___safe_from_collector_thread(st); (st) = NULL; } } while(st)
-static char *path_to_sys_dev_block_major_minor_string = NULL;
-static char *path_to_sys_block_device = NULL;
-static char *path_to_sys_block_device_bcache = NULL;
-static char *path_to_sys_devices_virtual_block_device = NULL;
-static char *path_to_device_mapper = NULL;
-static char *path_to_dev_disk = NULL;
-static char *path_to_sys_block = NULL;
-static char *path_to_device_label = NULL;
-static char *path_to_device_id = NULL;
-static char *path_to_veritas_volume_groups = NULL;
+static const char *path_to_sys_dev_block_major_minor_string = NULL;
+static const char *path_to_sys_block_device = NULL;
+static const char *path_to_sys_block_device_bcache = NULL;
+static const char *path_to_sys_devices_virtual_block_device = NULL;
+static const char *path_to_device_mapper = NULL;
+static const char *path_to_dev_disk = NULL;
+static const char *path_to_sys_block = NULL;
+static const char *path_to_device_label = NULL;
+static const char *path_to_device_id = NULL;
+static const char *path_to_veritas_volume_groups = NULL;
static int name_disks_by_id = CONFIG_BOOLEAN_NO;
static int global_bcache_priority_stats_update_every = 0; // disabled by default
@@ -998,7 +978,7 @@ static void disk_labels_cb(RRDSET *st, void *data) {
add_labels_to_disk(data, st);
}
-static int diskstats_function_block_devices(BUFFER *wb, const char *function __maybe_unused) {
+static int diskstats_function_block_devices(BUFFER *wb, const char *function __maybe_unused, BUFFER *payload __maybe_unused, const char *source __maybe_unused) {
buffer_flush(wb);
wb->content_type = CT_APPLICATION_JSON;
buffer_json_initialize(wb, "\"", "\"", 0, true, BUFFER_JSON_OPTIONS_DEFAULT);
@@ -1049,21 +1029,21 @@ static int diskstats_function_block_devices(BUFFER *wb, const char *function __m
max_io = MAX(max_io, io_total);
}
// Backlog and Busy Time
- double busy_perc = rrddim_get_last_stored_value(d->rd_util_utilization, &max_busy_perc, 1);
- double busy_time = rrddim_get_last_stored_value(d->rd_busy_busy, &max_busy_time, 1);
+ double busy_perc = rrddim_get_last_stored_value(d->disk_util.rd_util, &max_busy_perc, 1);
+ double busy_time = rrddim_get_last_stored_value(d->disk_busy.rd_busy, &max_busy_time, 1);
double backlog_time = rrddim_get_last_stored_value(d->rd_backlog_backlog, &max_backlog_time, 1);
// IOPS
- double iops_reads = rrddim_get_last_stored_value(d->rd_ops_reads, &max_iops_reads, 1);
- double iops_writes = rrddim_get_last_stored_value(d->rd_ops_writes, &max_iops_writes, 1);
+ double iops_reads = rrddim_get_last_stored_value(d->disk_ops.rd_ops_reads, &max_iops_reads, 1);
+ double iops_writes = rrddim_get_last_stored_value(d->disk_ops.rd_ops_writes, &max_iops_writes, 1);
// IO Time
- double iops_time_reads = rrddim_get_last_stored_value(d->rd_iotime_reads, &max_iops_time_reads, 1);
- double iops_time_writes = rrddim_get_last_stored_value(d->rd_iotime_writes, &max_iops_time_writes, 1);
+ double iops_time_reads = rrddim_get_last_stored_value(d->disk_iotime.rd_reads_ms, &max_iops_time_reads, 1);
+ double iops_time_writes = rrddim_get_last_stored_value(d->disk_iotime.rd_writes_ms, &max_iops_time_writes, 1);
// Avg IO Time
- double iops_avg_time_read = rrddim_get_last_stored_value(d->rd_await_reads, &max_iops_avg_time_read, 1);
- double iops_avg_time_write = rrddim_get_last_stored_value(d->rd_await_writes, &max_iops_avg_time_write, 1);
+ double iops_avg_time_read = rrddim_get_last_stored_value(d->disk_await.rd_await_reads, &max_iops_avg_time_read, 1);
+ double iops_avg_time_write = rrddim_get_last_stored_value(d->disk_await.rd_await_writes, &max_iops_avg_time_write, 1);
// Avg IO Size
- double iops_avg_size_read = rrddim_get_last_stored_value(d->rd_avgsz_reads, &max_iops_avg_size_read, 1);
- double iops_avg_size_write = rrddim_get_last_stored_value(d->rd_avgsz_writes, &max_iops_avg_size_write, 1);
+ double iops_avg_size_read = rrddim_get_last_stored_value(d->disk_avgsz.rd_avgsz_reads, &max_iops_avg_size_read, 1);
+ double iops_avg_size_write = rrddim_get_last_stored_value(d->disk_avgsz.rd_avgsz_writes, &max_iops_avg_size_write, 1);
buffer_json_add_array_item_double(wb, io_reads);
@@ -1287,23 +1267,25 @@ static void diskstats_cleanup_disks() {
if (unlikely(global_cleanup_removed_disks && !d->updated)) {
struct disk *t = d;
- rrdset_obsolete_and_pointer_null(d->st_avgsz);
+ rrdset_obsolete_and_pointer_null(d->disk_io.st_io);
+ rrdset_obsolete_and_pointer_null(d->disk_ops.st_ops);
+ rrdset_obsolete_and_pointer_null(d->disk_qops.st_qops);
+ rrdset_obsolete_and_pointer_null(d->disk_util.st_util);
+ rrdset_obsolete_and_pointer_null(d->disk_busy.st_busy);
+ rrdset_obsolete_and_pointer_null(d->disk_iotime.st_iotime);
+ rrdset_obsolete_and_pointer_null(d->disk_await.st_await);
+ rrdset_obsolete_and_pointer_null(d->disk_svctm.st_svctm);
+
+ rrdset_obsolete_and_pointer_null(d->disk_avgsz.st_avgsz);
rrdset_obsolete_and_pointer_null(d->st_ext_avgsz);
- rrdset_obsolete_and_pointer_null(d->st_await);
rrdset_obsolete_and_pointer_null(d->st_ext_await);
rrdset_obsolete_and_pointer_null(d->st_backlog);
- rrdset_obsolete_and_pointer_null(d->st_busy);
rrdset_obsolete_and_pointer_null(d->disk_io.st_io);
rrdset_obsolete_and_pointer_null(d->st_ext_io);
- rrdset_obsolete_and_pointer_null(d->st_iotime);
rrdset_obsolete_and_pointer_null(d->st_ext_iotime);
rrdset_obsolete_and_pointer_null(d->st_mops);
rrdset_obsolete_and_pointer_null(d->st_ext_mops);
- rrdset_obsolete_and_pointer_null(d->st_ops);
rrdset_obsolete_and_pointer_null(d->st_ext_ops);
- rrdset_obsolete_and_pointer_null(d->st_qops);
- rrdset_obsolete_and_pointer_null(d->st_svctm);
- rrdset_obsolete_and_pointer_null(d->st_util);
rrdset_obsolete_and_pointer_null(d->st_bcache);
rrdset_obsolete_and_pointer_null(d->st_bcache_bypass);
rrdset_obsolete_and_pointer_null(d->st_bcache_rates);
@@ -1374,7 +1356,7 @@ int do_proc_diskstats(int update_every, usec_t dt) {
global_do_ext = config_get_boolean_ondemand(CONFIG_SECTION_PLUGIN_PROC_DISKSTATS, "extended operations for all disks", global_do_ext);
global_do_backlog = config_get_boolean_ondemand(CONFIG_SECTION_PLUGIN_PROC_DISKSTATS, "backlog for all disks", global_do_backlog);
global_do_bcache = config_get_boolean_ondemand(CONFIG_SECTION_PLUGIN_PROC_DISKSTATS, "bcache for all disks", global_do_bcache);
- global_bcache_priority_stats_update_every = (int)config_get_number(CONFIG_SECTION_PLUGIN_PROC_DISKSTATS, "bcache priority stats update every", global_bcache_priority_stats_update_every);
+ global_bcache_priority_stats_update_every = (int)config_get_duration_seconds(CONFIG_SECTION_PLUGIN_PROC_DISKSTATS, "bcache priority stats update every", global_bcache_priority_stats_update_every);
global_cleanup_removed_disks = config_get_boolean(CONFIG_SECTION_PLUGIN_PROC_DISKSTATS, "remove charts of removed disks" , global_cleanup_removed_disks);
@@ -1421,7 +1403,8 @@ int do_proc_diskstats(int update_every, usec_t dt) {
SIMPLE_PATTERN_EXACT, true);
rrd_function_add_inline(localhost, NULL, "block-devices", 10,
- RRDFUNCTIONS_PRIORITY_DEFAULT, RRDFUNCTIONS_DISKSTATS_HELP,
+ RRDFUNCTIONS_PRIORITY_DEFAULT, RRDFUNCTIONS_VERSION_DEFAULT,
+ RRDFUNCTIONS_DISKSTATS_HELP,
"top", HTTP_ACCESS_ANONYMOUS_DATA,
diskstats_function_block_devices);
}
@@ -1453,18 +1436,17 @@ int do_proc_diskstats(int update_every, usec_t dt) {
char *disk;
unsigned long major = 0, minor = 0;
- collected_number reads = 0, mreads = 0, readsectors = 0, readms = 0,
- writes = 0, mwrites = 0, writesectors = 0, writems = 0,
+ collected_number rd_ios = 0, mreads = 0, readsectors = 0, readms = 0, wr_ios = 0, mwrites = 0, writesectors = 0, writems = 0,
queued_ios = 0, busy_ms = 0, backlog_ms = 0,
discards = 0, mdiscards = 0, discardsectors = 0, discardms = 0,
flushes = 0, flushms = 0;
- collected_number last_reads = 0, last_readsectors = 0, last_readms = 0,
- last_writes = 0, last_writesectors = 0, last_writems = 0,
- last_busy_ms = 0,
- last_discards = 0, last_discardsectors = 0, last_discardms = 0,
- last_flushes = 0, last_flushms = 0;
+ collected_number last_rd_ios = 0, last_readsectors = 0, last_readms = 0,
+ last_wr_ios = 0, last_writesectors = 0, last_writems = 0,
+ last_busy_ms = 0,
+ last_discards = 0, last_discardsectors = 0, last_discardms = 0,
+ last_flushes = 0, last_flushms = 0;
size_t words = procfile_linewords(ff, l);
if(unlikely(words < 14)) continue;
@@ -1475,8 +1457,8 @@ int do_proc_diskstats(int update_every, usec_t dt) {
// # of reads completed # of writes completed
// This is the total number of reads or writes completed successfully.
- reads = str2ull(procfile_lineword(ff, l, 3), NULL); // rd_ios
- writes = str2ull(procfile_lineword(ff, l, 7), NULL); // wr_ios
+ rd_ios = str2ull(procfile_lineword(ff, l, 3), NULL); // rd_ios
+ wr_ios = str2ull(procfile_lineword(ff, l, 7), NULL); // wr_ios
// # of reads merged # of writes merged
// Reads and writes which are adjacent to each other may be merged for
@@ -1615,33 +1597,15 @@ int do_proc_diskstats(int update_every, usec_t dt) {
if (d->do_ops == CONFIG_BOOLEAN_YES || d->do_ops == CONFIG_BOOLEAN_AUTO) {
d->do_ops = CONFIG_BOOLEAN_YES;
- if(unlikely(!d->st_ops)) {
- d->st_ops = rrdset_create_localhost(
- "disk_ops"
- , d->chart_id
- , d->disk
- , family
- , "disk.ops"
- , "Disk Completed I/O Operations"
- , "operations/s"
- , PLUGIN_PROC_NAME
- , PLUGIN_PROC_MODULE_DISKSTATS_NAME
- , NETDATA_CHART_PRIO_DISK_OPS
- , update_every
- , RRDSET_TYPE_LINE
- );
-
- rrdset_flag_set(d->st_ops, RRDSET_FLAG_DETAIL);
-
- d->rd_ops_reads = rrddim_add(d->st_ops, "reads", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- d->rd_ops_writes = rrddim_add(d->st_ops, "writes", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
-
- add_labels_to_disk(d, d->st_ops);
- }
+ last_rd_ios = d->disk_ops.rd_ops_reads ? d->disk_ops.rd_ops_reads->collector.last_collected_value : 0;
+ last_wr_ios = d->disk_ops.rd_ops_writes ? d->disk_ops.rd_ops_writes->collector.last_collected_value : 0;
- last_reads = rrddim_set_by_pointer(d->st_ops, d->rd_ops_reads, reads);
- last_writes = rrddim_set_by_pointer(d->st_ops, d->rd_ops_writes, writes);
- rrdset_done(d->st_ops);
+ common_disk_ops(&d->disk_ops,
+ d->chart_id,
+ d->disk, rd_ios, wr_ios,
+ update_every,
+ disk_labels_cb,
+ d);
}
if (do_dc_stats && d->do_ops == CONFIG_BOOLEAN_YES && d->do_ext != CONFIG_BOOLEAN_NO) {
@@ -1661,8 +1625,6 @@ int do_proc_diskstats(int update_every, usec_t dt) {
, RRDSET_TYPE_LINE
);
- rrdset_flag_set(d->st_ext_ops, RRDSET_FLAG_DETAIL);
-
d->rd_ops_discards = rrddim_add(d->st_ext_ops, "discards", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
if (do_fl_stats)
d->rd_ops_flushes = rrddim_add(d->st_ext_ops, "flushes", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
@@ -1679,31 +1641,14 @@ int do_proc_diskstats(int update_every, usec_t dt) {
if (d->do_qops == CONFIG_BOOLEAN_YES || d->do_qops == CONFIG_BOOLEAN_AUTO) {
d->do_qops = CONFIG_BOOLEAN_YES;
- if(unlikely(!d->st_qops)) {
- d->st_qops = rrdset_create_localhost(
- "disk_qops"
- , d->chart_id
- , d->disk
- , family
- , "disk.qops"
- , "Disk Current I/O Operations"
- , "operations"
- , PLUGIN_PROC_NAME
- , PLUGIN_PROC_MODULE_DISKSTATS_NAME
- , NETDATA_CHART_PRIO_DISK_QOPS
- , update_every
- , RRDSET_TYPE_LINE
- );
-
- rrdset_flag_set(d->st_qops, RRDSET_FLAG_DETAIL);
-
- d->rd_qops_operations = rrddim_add(d->st_qops, "operations", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
-
- add_labels_to_disk(d, d->st_qops);
- }
-
- rrddim_set_by_pointer(d->st_qops, d->rd_qops_operations, queued_ios);
- rrdset_done(d->st_qops);
+ common_disk_qops(
+ &d->disk_qops,
+ d->chart_id,
+ d->disk,
+ queued_ios,
+ update_every,
+ disk_labels_cb,
+ d);
}
if (d->do_backlog == CONFIG_BOOLEAN_YES || d->do_backlog == CONFIG_BOOLEAN_AUTO) {
@@ -1725,8 +1670,6 @@ int do_proc_diskstats(int update_every, usec_t dt) {
, RRDSET_TYPE_AREA
);
- rrdset_flag_set(d->st_backlog, RRDSET_FLAG_DETAIL);
-
d->rd_backlog_backlog = rrddim_add(d->st_backlog, "backlog", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
add_labels_to_disk(d, d->st_backlog);
@@ -1739,61 +1682,28 @@ int do_proc_diskstats(int update_every, usec_t dt) {
if (d->do_util == CONFIG_BOOLEAN_YES || d->do_util == CONFIG_BOOLEAN_AUTO) {
d->do_util = CONFIG_BOOLEAN_YES;
- if(unlikely(!d->st_busy)) {
- d->st_busy = rrdset_create_localhost(
- "disk_busy"
- , d->chart_id
- , d->disk
- , family
- , "disk.busy"
- , "Disk Busy Time"
- , "milliseconds"
- , PLUGIN_PROC_NAME
- , PLUGIN_PROC_MODULE_DISKSTATS_NAME
- , NETDATA_CHART_PRIO_DISK_BUSY
- , update_every
- , RRDSET_TYPE_AREA
- );
-
- rrdset_flag_set(d->st_busy, RRDSET_FLAG_DETAIL);
-
- d->rd_busy_busy = rrddim_add(d->st_busy, "busy", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
-
- add_labels_to_disk(d, d->st_busy);
- }
-
- last_busy_ms = rrddim_set_by_pointer(d->st_busy, d->rd_busy_busy, busy_ms);
- rrdset_done(d->st_busy);
-
- if(unlikely(!d->st_util)) {
- d->st_util = rrdset_create_localhost(
- "disk_util"
- , d->chart_id
- , d->disk
- , family
- , "disk.util"
- , "Disk Utilization Time"
- , "% of time working"
- , PLUGIN_PROC_NAME
- , PLUGIN_PROC_MODULE_DISKSTATS_NAME
- , NETDATA_CHART_PRIO_DISK_UTIL
- , update_every
- , RRDSET_TYPE_AREA
- );
-
- rrdset_flag_set(d->st_util, RRDSET_FLAG_DETAIL);
+ last_busy_ms = d->disk_busy.rd_busy ? d->disk_busy.rd_busy->collector.last_collected_value : 0;
- d->rd_util_utilization = rrddim_add(d->st_util, "utilization", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
-
- add_labels_to_disk(d, d->st_util);
- }
+ common_disk_busy(&d->disk_busy,
+ d->chart_id,
+ d->disk,
+ busy_ms,
+ update_every,
+ disk_labels_cb,
+ d);
collected_number disk_utilization = (busy_ms - last_busy_ms) / (10 * update_every);
if (disk_utilization > 100)
disk_utilization = 100;
- rrddim_set_by_pointer(d->st_util, d->rd_util_utilization, disk_utilization);
- rrdset_done(d->st_util);
+ common_disk_util(&d->disk_util,
+ d->chart_id,
+ d->disk,
+ disk_utilization,
+ update_every,
+ disk_labels_cb,
+ d);
+
}
if (d->do_mops == CONFIG_BOOLEAN_YES || d->do_mops == CONFIG_BOOLEAN_AUTO) {
@@ -1815,8 +1725,6 @@ int do_proc_diskstats(int update_every, usec_t dt) {
, RRDSET_TYPE_LINE
);
- rrdset_flag_set(d->st_mops, RRDSET_FLAG_DETAIL);
-
d->rd_mops_reads = rrddim_add(d->st_mops, "reads", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
d->rd_mops_writes = rrddim_add(d->st_mops, "writes", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
@@ -1847,8 +1755,6 @@ int do_proc_diskstats(int update_every, usec_t dt) {
, RRDSET_TYPE_LINE
);
- rrdset_flag_set(d->st_ext_mops, RRDSET_FLAG_DETAIL);
-
d->rd_mops_discards = rrddim_add(d->st_ext_mops, "discards", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
add_labels_to_disk(d, d->st_ext_mops);
@@ -1861,33 +1767,18 @@ int do_proc_diskstats(int update_every, usec_t dt) {
if (d->do_iotime == CONFIG_BOOLEAN_YES || d->do_iotime == CONFIG_BOOLEAN_AUTO) {
d->do_iotime = CONFIG_BOOLEAN_YES;
- if(unlikely(!d->st_iotime)) {
- d->st_iotime = rrdset_create_localhost(
- "disk_iotime"
- , d->chart_id
- , d->disk
- , family
- , "disk.iotime"
- , "Disk Total I/O Time"
- , "milliseconds/s"
- , PLUGIN_PROC_NAME
- , PLUGIN_PROC_MODULE_DISKSTATS_NAME
- , NETDATA_CHART_PRIO_DISK_IOTIME
- , update_every
- , RRDSET_TYPE_LINE
- );
-
- rrdset_flag_set(d->st_iotime, RRDSET_FLAG_DETAIL);
-
- d->rd_iotime_reads = rrddim_add(d->st_iotime, "reads", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- d->rd_iotime_writes = rrddim_add(d->st_iotime, "writes", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
-
- add_labels_to_disk(d, d->st_iotime);
- }
-
- last_readms = rrddim_set_by_pointer(d->st_iotime, d->rd_iotime_reads, readms);
- last_writems = rrddim_set_by_pointer(d->st_iotime, d->rd_iotime_writes, writems);
- rrdset_done(d->st_iotime);
+ last_readms = d->disk_iotime.rd_reads_ms ? d->disk_iotime.rd_reads_ms->collector.last_collected_value : 0;
+ last_writems = d->disk_iotime.rd_writes_ms ? d->disk_iotime.rd_writes_ms->collector.last_collected_value : 0;
+
+ common_disk_iotime(
+ &d->disk_iotime,
+ d->chart_id,
+ d->disk,
+ readms,
+ writems,
+ update_every,
+ disk_labels_cb,
+ d);
}
if(do_dc_stats && d->do_iotime == CONFIG_BOOLEAN_YES && d->do_ext != CONFIG_BOOLEAN_NO) {
@@ -1907,8 +1798,6 @@ int do_proc_diskstats(int update_every, usec_t dt) {
, RRDSET_TYPE_LINE
);
- rrdset_flag_set(d->st_ext_iotime, RRDSET_FLAG_DETAIL);
-
d->rd_iotime_discards = rrddim_add(d->st_ext_iotime, "discards", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
if (do_fl_stats)
d->rd_iotime_flushes = rrddim_add(d->st_ext_iotime, "flushes", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
@@ -1928,36 +1817,19 @@ int do_proc_diskstats(int update_every, usec_t dt) {
if(likely(dt)) {
if ((d->do_iotime == CONFIG_BOOLEAN_YES || d->do_iotime == CONFIG_BOOLEAN_AUTO) &&
(d->do_ops == CONFIG_BOOLEAN_YES || d->do_ops == CONFIG_BOOLEAN_AUTO)) {
- if(unlikely(!d->st_await)) {
- d->st_await = rrdset_create_localhost(
- "disk_await"
- , d->chart_id
- , d->disk
- , family
- , "disk.await"
- , "Average Completed I/O Operation Time"
- , "milliseconds/operation"
- , PLUGIN_PROC_NAME
- , PLUGIN_PROC_MODULE_DISKSTATS_NAME
- , NETDATA_CHART_PRIO_DISK_AWAIT
- , update_every
- , RRDSET_TYPE_LINE
- );
-
- rrdset_flag_set(d->st_await, RRDSET_FLAG_DETAIL);
-
- d->rd_await_reads = rrddim_add(d->st_await, "reads", NULL, 1, 1000, RRD_ALGORITHM_ABSOLUTE);
- d->rd_await_writes = rrddim_add(d->st_await, "writes", NULL, -1, 1000, RRD_ALGORITHM_ABSOLUTE);
-
- add_labels_to_disk(d, d->st_await);
- }
- double read_avg = (reads - last_reads) ? (double)(readms - last_readms) / (reads - last_reads) : 0;
- double write_avg = (writes - last_writes) ? (double)(writems - last_writems) / (writes - last_writes) : 0;
-
- rrddim_set_by_pointer(d->st_await, d->rd_await_reads, (collected_number)(read_avg * 1000));
- rrddim_set_by_pointer(d->st_await, d->rd_await_writes, (collected_number)(write_avg * 1000));
- rrdset_done(d->st_await);
+ double read_ms_avg = (rd_ios - last_rd_ios) ? (double)(readms - last_readms) / (rd_ios - last_rd_ios) : 0;
+ double write_ms_avg = (wr_ios - last_wr_ios) ? (double)(writems - last_writems) / (wr_ios - last_wr_ios) : 0;
+
+ common_disk_await(
+ &d->disk_await,
+ d->chart_id,
+ d->disk,
+ read_ms_avg,
+ write_ms_avg,
+ update_every,
+ disk_labels_cb,
+ d);
}
if (do_dc_stats && d->do_iotime == CONFIG_BOOLEAN_YES && d->do_ops == CONFIG_BOOLEAN_YES && d->do_ext != CONFIG_BOOLEAN_NO) {
@@ -1977,8 +1849,6 @@ int do_proc_diskstats(int update_every, usec_t dt) {
, RRDSET_TYPE_LINE
);
- rrdset_flag_set(d->st_ext_await, RRDSET_FLAG_DETAIL);
-
d->rd_await_discards = rrddim_add(d->st_ext_await, "discards", NULL, 1, 1000, RRD_ALGORITHM_ABSOLUTE);
if (do_fl_stats)
d->rd_await_flushes = rrddim_add(d->st_ext_await, "flushes", NULL, 1, 1000, RRD_ALGORITHM_ABSOLUTE);
@@ -2001,33 +1871,19 @@ int do_proc_diskstats(int update_every, usec_t dt) {
if ((d->do_io == CONFIG_BOOLEAN_YES || d->do_io == CONFIG_BOOLEAN_AUTO) &&
(d->do_ops == CONFIG_BOOLEAN_YES || d->do_ops == CONFIG_BOOLEAN_AUTO)) {
- if(unlikely(!d->st_avgsz)) {
- d->st_avgsz = rrdset_create_localhost(
- "disk_avgsz"
- , d->chart_id
- , d->disk
- , family
- , "disk.avgsz"
- , "Average Completed I/O Operation Bandwidth"
- , "KiB/operation"
- , PLUGIN_PROC_NAME
- , PLUGIN_PROC_MODULE_DISKSTATS_NAME
- , NETDATA_CHART_PRIO_DISK_AVGSZ
- , update_every
- , RRDSET_TYPE_AREA
- );
-
- rrdset_flag_set(d->st_avgsz, RRDSET_FLAG_DETAIL);
-
- d->rd_avgsz_reads = rrddim_add(d->st_avgsz, "reads", NULL, SECTOR_SIZE, 1024, RRD_ALGORITHM_ABSOLUTE);
- d->rd_avgsz_writes = rrddim_add(d->st_avgsz, "writes", NULL, SECTOR_SIZE * -1, 1024, RRD_ALGORITHM_ABSOLUTE);
-
- add_labels_to_disk(d, d->st_avgsz);
- }
- rrddim_set_by_pointer(d->st_avgsz, d->rd_avgsz_reads, (reads - last_reads) ? (readsectors - last_readsectors) / (reads - last_reads) : 0);
- rrddim_set_by_pointer(d->st_avgsz, d->rd_avgsz_writes, (writes - last_writes) ? (writesectors - last_writesectors) / (writes - last_writes) : 0);
- rrdset_done(d->st_avgsz);
+ kernel_uint_t avg_read_bytes = SECTOR_SIZE * ((rd_ios - last_rd_ios) ? (readsectors - last_readsectors) / (rd_ios - last_rd_ios) : 0);
+ kernel_uint_t avg_write_bytes = SECTOR_SIZE * ((wr_ios - last_wr_ios) ? (writesectors - last_writesectors) / (wr_ios - last_wr_ios) : 0);
+
+ common_disk_avgsz(
+ &d->disk_avgsz,
+ d->chart_id,
+ d->disk,
+ avg_read_bytes,
+ avg_write_bytes,
+ update_every,
+ disk_labels_cb,
+ d);
}
if(do_dc_stats && d->do_io == CONFIG_BOOLEAN_YES && d->do_ops == CONFIG_BOOLEAN_YES && d->do_ext != CONFIG_BOOLEAN_NO) {
@@ -2047,8 +1903,6 @@ int do_proc_diskstats(int update_every, usec_t dt) {
, RRDSET_TYPE_AREA
);
- rrdset_flag_set(d->st_ext_avgsz, RRDSET_FLAG_DETAIL);
-
d->rd_avgsz_discards = rrddim_add(d->st_ext_avgsz, "discards", NULL, SECTOR_SIZE, 1024, RRD_ALGORITHM_ABSOLUTE);
add_labels_to_disk(d, d->st_ext_avgsz);
@@ -2063,36 +1917,20 @@ int do_proc_diskstats(int update_every, usec_t dt) {
if ((d->do_util == CONFIG_BOOLEAN_YES || d->do_util == CONFIG_BOOLEAN_AUTO) &&
(d->do_ops == CONFIG_BOOLEAN_YES || d->do_ops == CONFIG_BOOLEAN_AUTO)) {
- if(unlikely(!d->st_svctm)) {
- d->st_svctm = rrdset_create_localhost(
- "disk_svctm"
- , d->chart_id
- , d->disk
- , family
- , "disk.svctm"
- , "Average Service Time"
- , "milliseconds/operation"
- , PLUGIN_PROC_NAME
- , PLUGIN_PROC_MODULE_DISKSTATS_NAME
- , NETDATA_CHART_PRIO_DISK_SVCTM
- , update_every
- , RRDSET_TYPE_LINE
- );
-
- rrdset_flag_set(d->st_svctm, RRDSET_FLAG_DETAIL);
-
- d->rd_svctm_svctm = rrddim_add(d->st_svctm, "svctm", NULL, 1, 1000, RRD_ALGORITHM_ABSOLUTE);
-
- add_labels_to_disk(d, d->st_svctm);
- }
double svctm_avg =
- ((reads - last_reads) + (writes - last_writes)) ?
- (double)(busy_ms - last_busy_ms) / ((reads - last_reads) + (writes - last_writes)) :
+ ((rd_ios - last_rd_ios) + (wr_ios - last_wr_ios)) ?
+ (double) (busy_ms - last_busy_ms) / ((rd_ios - last_rd_ios) + (wr_ios - last_wr_ios)) :
0;
- rrddim_set_by_pointer(d->st_svctm, d->rd_svctm_svctm, (collected_number)(svctm_avg * 1000));
- rrdset_done(d->st_svctm);
+ common_disk_svctm(
+ &d->disk_svctm,
+ d->chart_id,
+ d->disk,
+ svctm_avg,
+ update_every,
+ disk_labels_cb,
+ d);
}
}
@@ -2331,8 +2169,6 @@ int do_proc_diskstats(int update_every, usec_t dt) {
, RRDSET_TYPE_LINE
);
- rrdset_flag_set(d->st_bcache, RRDSET_FLAG_DETAIL);
-
d->rd_bcache_hits = rrddim_add(d->st_bcache, "hits", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
d->rd_bcache_misses = rrddim_add(d->st_bcache, "misses", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
d->rd_bcache_miss_collisions = rrddim_add(d->st_bcache, "collisions", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
@@ -2365,8 +2201,6 @@ int do_proc_diskstats(int update_every, usec_t dt) {
, RRDSET_TYPE_LINE
);
- rrdset_flag_set(d->st_bcache_bypass, RRDSET_FLAG_DETAIL);
-
d->rd_bcache_bypass_hits = rrddim_add(d->st_bcache_bypass, "hits", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
d->rd_bcache_bypass_misses = rrddim_add(d->st_bcache_bypass, "misses", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
diff --git a/src/collectors/proc.plugin/proc_mdstat.c b/src/collectors/proc.plugin/proc_mdstat.c
index 3857d9ec4..47c4f0d2b 100644
--- a/src/collectors/proc.plugin/proc_mdstat.c
+++ b/src/collectors/proc.plugin/proc_mdstat.c
@@ -89,7 +89,7 @@ int do_proc_mdstat(int update_every, usec_t dt)
static int do_health = -1, do_nonredundant = -1, do_disks = -1, do_operations = -1, do_mismatch = -1,
do_mismatch_config = -1;
static int make_charts_obsolete = -1;
- static char *mdstat_filename = NULL, *mismatch_cnt_filename = NULL;
+ static const char *mdstat_filename = NULL, *mismatch_cnt_filename = NULL;
static struct raid *raids = NULL;
static size_t raids_allocated = 0;
size_t raids_num = 0, raid_idx = 0, redundant_num = 0;
diff --git a/src/collectors/proc.plugin/proc_meminfo.c b/src/collectors/proc.plugin/proc_meminfo.c
index 781329b59..de3f42329 100644
--- a/src/collectors/proc.plugin/proc_meminfo.c
+++ b/src/collectors/proc.plugin/proc_meminfo.c
@@ -347,8 +347,6 @@ int do_proc_meminfo(int update_every, usec_t dt) {
, RRDSET_TYPE_LINE
);
- rrdset_flag_set(st_mem_hwcorrupt, RRDSET_FLAG_DETAIL);
-
rd_corrupted = rrddim_add(st_mem_hwcorrupt, "HardwareCorrupted", NULL, 1, 1024, RRD_ALGORITHM_ABSOLUTE);
}
@@ -376,8 +374,6 @@ int do_proc_meminfo(int update_every, usec_t dt) {
, RRDSET_TYPE_AREA
);
- rrdset_flag_set(st_mem_committed, RRDSET_FLAG_DETAIL);
-
rd_committed = rrddim_add(st_mem_committed, "Committed_AS", NULL, 1, 1024, RRD_ALGORITHM_ABSOLUTE);
}
@@ -404,7 +400,6 @@ int do_proc_meminfo(int update_every, usec_t dt) {
, update_every
, RRDSET_TYPE_LINE
);
- rrdset_flag_set(st_mem_writeback, RRDSET_FLAG_DETAIL);
rd_dirty = rrddim_add(st_mem_writeback, "Dirty", NULL, 1, 1024, RRD_ALGORITHM_ABSOLUTE);
rd_writeback = rrddim_add(st_mem_writeback, "Writeback", NULL, 1, 1024, RRD_ALGORITHM_ABSOLUTE);
@@ -444,8 +439,6 @@ int do_proc_meminfo(int update_every, usec_t dt) {
, RRDSET_TYPE_STACKED
);
- rrdset_flag_set(st_mem_kernel, RRDSET_FLAG_DETAIL);
-
rd_slab = rrddim_add(st_mem_kernel, "Slab", NULL, 1, 1024, RRD_ALGORITHM_ABSOLUTE);
rd_kernelstack = rrddim_add(st_mem_kernel, "KernelStack", NULL, 1, 1024, RRD_ALGORITHM_ABSOLUTE);
rd_pagetables = rrddim_add(st_mem_kernel, "PageTables", NULL, 1, 1024, RRD_ALGORITHM_ABSOLUTE);
@@ -484,8 +477,6 @@ int do_proc_meminfo(int update_every, usec_t dt) {
, RRDSET_TYPE_STACKED
);
- rrdset_flag_set(st_mem_slab, RRDSET_FLAG_DETAIL);
-
rd_reclaimable = rrddim_add(st_mem_slab, "reclaimable", NULL, 1, 1024, RRD_ALGORITHM_ABSOLUTE);
rd_unreclaimable = rrddim_add(st_mem_slab, "unreclaimable", NULL, 1, 1024, RRD_ALGORITHM_ABSOLUTE);
}
@@ -518,8 +509,6 @@ int do_proc_meminfo(int update_every, usec_t dt) {
, RRDSET_TYPE_STACKED
);
- rrdset_flag_set(st_mem_hugepages, RRDSET_FLAG_DETAIL);
-
rd_free = rrddim_add(st_mem_hugepages, "free", NULL, Hugepagesize, 1024, RRD_ALGORITHM_ABSOLUTE);
rd_used = rrddim_add(st_mem_hugepages, "used", NULL, Hugepagesize, 1024, RRD_ALGORITHM_ABSOLUTE);
rd_surp = rrddim_add(st_mem_hugepages, "surplus", NULL, Hugepagesize, 1024, RRD_ALGORITHM_ABSOLUTE);
@@ -555,8 +544,6 @@ int do_proc_meminfo(int update_every, usec_t dt) {
, RRDSET_TYPE_STACKED
);
- rrdset_flag_set(st_mem_transparent_hugepages, RRDSET_FLAG_DETAIL);
-
rd_anonymous = rrddim_add(st_mem_transparent_hugepages, "anonymous", NULL, 1, 1024, RRD_ALGORITHM_ABSOLUTE);
rd_shared = rrddim_add(st_mem_transparent_hugepages, "shmem", NULL, 1, 1024, RRD_ALGORITHM_ABSOLUTE);
}
@@ -585,8 +572,6 @@ int do_proc_meminfo(int update_every, usec_t dt) {
, RRDSET_TYPE_LINE
);
- rrdset_flag_set(st_mem_thp_details, RRDSET_FLAG_DETAIL);
-
rd_shmem_pmd_mapped = rrddim_add(st_mem_thp_details, "shmem_pmd", "ShmemPmdMapped", 1, 1024, RRD_ALGORITHM_ABSOLUTE);
rd_file_huge_pages = rrddim_add(st_mem_thp_details, "file", "FileHugePages", 1, 1024, RRD_ALGORITHM_ABSOLUTE);
rd_file_pmd_mapped = rrddim_add(st_mem_thp_details, "file_pmd", "FilePmdMapped", 1, 1024, RRD_ALGORITHM_ABSOLUTE);
@@ -622,8 +607,6 @@ int do_proc_meminfo(int update_every, usec_t dt) {
, RRDSET_TYPE_LINE
);
- rrdset_flag_set(st_mem_reclaiming, RRDSET_FLAG_DETAIL);
-
rd_active = rrddim_add(st_mem_reclaiming, "active", "Active", 1, 1024, RRD_ALGORITHM_ABSOLUTE);
rd_inactive = rrddim_add(st_mem_reclaiming, "inactive", "Inactive", 1, 1024, RRD_ALGORITHM_ABSOLUTE);
rd_active_anon = rrddim_add(st_mem_reclaiming, "active_anon", "Active(anon)", 1, 1024, RRD_ALGORITHM_ABSOLUTE);
@@ -667,8 +650,6 @@ int do_proc_meminfo(int update_every, usec_t dt) {
, RRDSET_TYPE_STACKED
);
- rrdset_flag_set(st_mem_high_low, RRDSET_FLAG_DETAIL);
-
rd_high_used = rrddim_add(st_mem_high_low, "high_used", NULL, 1, 1024, RRD_ALGORITHM_ABSOLUTE);
rd_low_used = rrddim_add(st_mem_high_low, "low_used", NULL, 1, 1024, RRD_ALGORITHM_ABSOLUTE);
rd_high_free = rrddim_add(st_mem_high_low, "high_free", NULL, 1, 1024, RRD_ALGORITHM_ABSOLUTE);
diff --git a/src/collectors/proc.plugin/proc_net_dev.c b/src/collectors/proc.plugin/proc_net_dev.c
index 41c10ddbc..3af59aed1 100644
--- a/src/collectors/proc.plugin/proc_net_dev.c
+++ b/src/collectors/proc.plugin/proc_net_dev.c
@@ -251,6 +251,8 @@ static struct netdev {
// ----------------------------------------------------------------------------
static void netdev_charts_release(struct netdev *d) {
+ rrdvar_chart_variable_release(d->st_bandwidth, d->chart_var_speed);
+
if(d->st_bandwidth) rrdset_is_obsolete___safe_from_collector_thread(d->st_bandwidth);
if(d->st_packets) rrdset_is_obsolete___safe_from_collector_thread(d->st_packets);
if(d->st_errors) rrdset_is_obsolete___safe_from_collector_thread(d->st_errors);
@@ -472,7 +474,7 @@ static void netdev_rename_this_device(struct netdev *d) {
// ----------------------------------------------------------------------------
-int netdev_function_net_interfaces(BUFFER *wb, const char *function __maybe_unused) {
+static int netdev_function_net_interfaces(BUFFER *wb, const char *function __maybe_unused, BUFFER *payload __maybe_unused, const char *source __maybe_unused) {
buffer_flush(wb);
wb->content_type = CT_APPLICATION_JSON;
buffer_json_initialize(wb, "\"", "\"", 0, true, BUFFER_JSON_OPTIONS_DEFAULT);
@@ -1279,8 +1281,6 @@ int do_proc_net_dev(int update_every, usec_t dt) {
, RRDSET_TYPE_LINE
);
- rrdset_flag_set(d->st_speed, RRDSET_FLAG_DETAIL);
-
rrdset_update_rrdlabels(d->st_speed, d->chart_labels);
d->rd_speed = rrddim_add(d->st_speed, "speed", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
@@ -1319,8 +1319,6 @@ int do_proc_net_dev(int update_every, usec_t dt) {
, RRDSET_TYPE_LINE
);
- rrdset_flag_set(d->st_duplex, RRDSET_FLAG_DETAIL);
-
rrdset_update_rrdlabels(d->st_duplex, d->chart_labels);
d->rd_duplex_full = rrddim_add(d->st_duplex, "full", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
@@ -1351,8 +1349,6 @@ int do_proc_net_dev(int update_every, usec_t dt) {
, RRDSET_TYPE_LINE
);
- rrdset_flag_set(d->st_operstate, RRDSET_FLAG_DETAIL);
-
rrdset_update_rrdlabels(d->st_operstate, d->chart_labels);
d->rd_operstate_up = rrddim_add(d->st_operstate, "up", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
@@ -1391,8 +1387,6 @@ int do_proc_net_dev(int update_every, usec_t dt) {
, RRDSET_TYPE_LINE
);
- rrdset_flag_set(d->st_carrier, RRDSET_FLAG_DETAIL);
-
rrdset_update_rrdlabels(d->st_carrier, d->chart_labels);
d->rd_carrier_up = rrddim_add(d->st_carrier, "up", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
@@ -1421,8 +1415,6 @@ int do_proc_net_dev(int update_every, usec_t dt) {
, RRDSET_TYPE_LINE
);
- rrdset_flag_set(d->st_mtu, RRDSET_FLAG_DETAIL);
-
rrdset_update_rrdlabels(d->st_mtu, d->chart_labels);
d->rd_mtu = rrddim_add(d->st_mtu, "mtu", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
@@ -1450,8 +1442,6 @@ int do_proc_net_dev(int update_every, usec_t dt) {
, RRDSET_TYPE_LINE
);
- rrdset_flag_set(d->st_packets, RRDSET_FLAG_DETAIL);
-
rrdset_update_rrdlabels(d->st_packets, d->chart_labels);
d->rd_rpackets = rrddim_add(d->st_packets, "received", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
@@ -1491,8 +1481,6 @@ int do_proc_net_dev(int update_every, usec_t dt) {
, RRDSET_TYPE_LINE
);
- rrdset_flag_set(d->st_errors, RRDSET_FLAG_DETAIL);
-
rrdset_update_rrdlabels(d->st_errors, d->chart_labels);
d->rd_rerrors = rrddim_add(d->st_errors, "inbound", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
@@ -1530,8 +1518,6 @@ int do_proc_net_dev(int update_every, usec_t dt) {
, RRDSET_TYPE_LINE
);
- rrdset_flag_set(d->st_drops, RRDSET_FLAG_DETAIL);
-
rrdset_update_rrdlabels(d->st_drops, d->chart_labels);
d->rd_rdrops = rrddim_add(d->st_drops, "inbound", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
@@ -1569,8 +1555,6 @@ int do_proc_net_dev(int update_every, usec_t dt) {
, RRDSET_TYPE_LINE
);
- rrdset_flag_set(d->st_fifo, RRDSET_FLAG_DETAIL);
-
rrdset_update_rrdlabels(d->st_fifo, d->chart_labels);
d->rd_rfifo = rrddim_add(d->st_fifo, "receive", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
@@ -1608,8 +1592,6 @@ int do_proc_net_dev(int update_every, usec_t dt) {
, RRDSET_TYPE_LINE
);
- rrdset_flag_set(d->st_compressed, RRDSET_FLAG_DETAIL);
-
rrdset_update_rrdlabels(d->st_compressed, d->chart_labels);
d->rd_rcompressed = rrddim_add(d->st_compressed, "received", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
@@ -1647,8 +1629,6 @@ int do_proc_net_dev(int update_every, usec_t dt) {
, RRDSET_TYPE_LINE
);
- rrdset_flag_set(d->st_events, RRDSET_FLAG_DETAIL);
-
rrdset_update_rrdlabels(d->st_events, d->chart_labels);
d->rd_rframe = rrddim_add(d->st_events, "frames", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
@@ -1721,17 +1701,17 @@ void *netdev_main(void *ptr_is_null __maybe_unused)
virtual_device_collect_delay_secs = 300;
rrd_function_add_inline(localhost, NULL, "network-interfaces", 10,
- RRDFUNCTIONS_PRIORITY_DEFAULT, RRDFUNCTIONS_NETDEV_HELP,
+ RRDFUNCTIONS_PRIORITY_DEFAULT, RRDFUNCTIONS_VERSION_DEFAULT,
+ RRDFUNCTIONS_NETDEV_HELP,
"top", HTTP_ACCESS_ANONYMOUS_DATA,
netdev_function_net_interfaces);
- usec_t step = localhost->rrd_update_every * USEC_PER_SEC;
heartbeat_t hb;
- heartbeat_init(&hb);
+ heartbeat_init(&hb, localhost->rrd_update_every * USEC_PER_SEC);
while (service_running(SERVICE_COLLECTORS)) {
worker_is_idle();
- usec_t hb_dt = heartbeat_next(&hb, step);
+ usec_t hb_dt = heartbeat_next(&hb);
if (unlikely(!service_running(SERVICE_COLLECTORS)))
break;
diff --git a/src/collectors/proc.plugin/proc_net_netstat.c b/src/collectors/proc.plugin/proc_net_netstat.c
index da7a28fa7..6689a057f 100644
--- a/src/collectors/proc.plugin/proc_net_netstat.c
+++ b/src/collectors/proc.plugin/proc_net_netstat.c
@@ -465,7 +465,6 @@ static void do_proc_net_snmp6(int update_every) {
, update_every
, RRDSET_TYPE_LINE
);
- rrdset_flag_set(st, RRDSET_FLAG_DETAIL);
rd_ok = rrddim_add(st, "FragOKs", "ok", 1, 1, RRD_ALGORITHM_INCREMENTAL);
rd_failed = rrddim_add(st, "FragFails", "failed", -1, 1, RRD_ALGORITHM_INCREMENTAL);
@@ -501,7 +500,6 @@ static void do_proc_net_snmp6(int update_every) {
, NETDATA_CHART_PRIO_IPV6_FRAGSIN
, update_every
, RRDSET_TYPE_LINE);
- rrdset_flag_set(st, RRDSET_FLAG_DETAIL);
rd_ok = rrddim_add(st, "ReasmOKs", "ok", 1, 1, RRD_ALGORITHM_INCREMENTAL);
rd_failed = rrddim_add(st, "ReasmFails", "failed", -1, 1, RRD_ALGORITHM_INCREMENTAL);
@@ -544,7 +542,6 @@ static void do_proc_net_snmp6(int update_every) {
, update_every
, RRDSET_TYPE_LINE
);
- rrdset_flag_set(st, RRDSET_FLAG_DETAIL);
rd_InDiscards = rrddim_add(st, "InDiscards", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
rd_OutDiscards = rrddim_add(st, "OutDiscards", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
@@ -624,7 +621,6 @@ static void do_proc_net_snmp6(int update_every) {
, update_every
, RRDSET_TYPE_LINE
);
- rrdset_flag_set(st, RRDSET_FLAG_DETAIL);
rd_RcvbufErrors = rrddim_add(st, "RcvbufErrors", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
rd_SndbufErrors = rrddim_add(st, "SndbufErrors", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
@@ -697,7 +693,6 @@ static void do_proc_net_snmp6(int update_every) {
, update_every
, RRDSET_TYPE_LINE
);
- rrdset_flag_set(st, RRDSET_FLAG_DETAIL);
rd_RcvbufErrors = rrddim_add(st, "RcvbufErrors", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
rd_SndbufErrors = rrddim_add(st, "SndbufErrors", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
@@ -735,7 +730,6 @@ static void do_proc_net_snmp6(int update_every) {
, update_every
, RRDSET_TYPE_AREA
);
- rrdset_flag_set(st, RRDSET_FLAG_DETAIL);
rd_Ip6InMcastOctets = rrddim_add(st, "received", NULL, 8, BITS_IN_A_KILOBIT, RRD_ALGORITHM_INCREMENTAL);
rd_Ip6OutMcastOctets = rrddim_add(st, "sent", NULL, -8, BITS_IN_A_KILOBIT, RRD_ALGORITHM_INCREMENTAL);
@@ -767,7 +761,6 @@ static void do_proc_net_snmp6(int update_every) {
, update_every
, RRDSET_TYPE_AREA
);
- rrdset_flag_set(st, RRDSET_FLAG_DETAIL);
rd_Ip6InBcastOctets = rrddim_add(st, "received", NULL, 8, BITS_IN_A_KILOBIT, RRD_ALGORITHM_INCREMENTAL);
rd_Ip6OutBcastOctets = rrddim_add(st, "sent", NULL, -8, BITS_IN_A_KILOBIT, RRD_ALGORITHM_INCREMENTAL);
@@ -799,7 +792,6 @@ static void do_proc_net_snmp6(int update_every) {
, update_every
, RRDSET_TYPE_LINE
);
- rrdset_flag_set(st, RRDSET_FLAG_DETAIL);
rd_Ip6InMcastPkts = rrddim_add(st, "received", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
rd_Ip6OutMcastPkts = rrddim_add(st, "sent", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
@@ -1781,8 +1773,6 @@ int do_proc_net_netstat(int update_every, usec_t dt) {
, RRDSET_TYPE_AREA
);
- rrdset_flag_set(st_ip_mcast, RRDSET_FLAG_DETAIL);
-
rd_in = rrddim_add(st_ip_mcast, "received", NULL, 8, BITS_IN_A_KILOBIT, RRD_ALGORITHM_INCREMENTAL);
rd_out = rrddim_add(st_ip_mcast, "sent", NULL, -8, BITS_IN_A_KILOBIT, RRD_ALGORITHM_INCREMENTAL);
}
@@ -1817,8 +1807,6 @@ int do_proc_net_netstat(int update_every, usec_t dt) {
, RRDSET_TYPE_AREA
);
- rrdset_flag_set(st_ip_bcast, RRDSET_FLAG_DETAIL);
-
rd_in = rrddim_add(st_ip_bcast, "received", NULL, 8, BITS_IN_A_KILOBIT, RRD_ALGORITHM_INCREMENTAL);
rd_out = rrddim_add(st_ip_bcast, "sent", NULL, -8, BITS_IN_A_KILOBIT, RRD_ALGORITHM_INCREMENTAL);
}
@@ -1853,8 +1841,6 @@ int do_proc_net_netstat(int update_every, usec_t dt) {
, RRDSET_TYPE_LINE
);
- rrdset_flag_set(st_ip_mcastpkts, RRDSET_FLAG_DETAIL);
-
rd_in = rrddim_add(st_ip_mcastpkts, "received", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
rd_out = rrddim_add(st_ip_mcastpkts, "sent", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
}
@@ -1886,8 +1872,6 @@ int do_proc_net_netstat(int update_every, usec_t dt) {
, RRDSET_TYPE_LINE
);
- rrdset_flag_set(st_ip_bcastpkts, RRDSET_FLAG_DETAIL);
-
rd_in = rrddim_add(st_ip_bcastpkts, "received", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
rd_out = rrddim_add(st_ip_bcastpkts, "sent", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
}
@@ -1919,8 +1903,6 @@ int do_proc_net_netstat(int update_every, usec_t dt) {
, RRDSET_TYPE_LINE
);
- rrdset_flag_set(st_ecnpkts, RRDSET_FLAG_DETAIL);
-
rd_cep = rrddim_add(st_ecnpkts, "InCEPkts", "CEP", 1, 1, RRD_ALGORITHM_INCREMENTAL);
rd_noectp = rrddim_add(st_ecnpkts, "InNoECTPkts", "NoECTP", -1, 1, RRD_ALGORITHM_INCREMENTAL);
rd_ectp0 = rrddim_add(st_ecnpkts, "InECT0Pkts", "ECTP0", 1, 1, RRD_ALGORITHM_INCREMENTAL);
@@ -2241,7 +2223,6 @@ int do_proc_net_netstat(int update_every, usec_t dt) {
, update_every
, RRDSET_TYPE_LINE
);
- rrdset_flag_set(st, RRDSET_FLAG_DETAIL);
rd_FragOKs = rrddim_add(st, "FragOKs", "ok", 1, 1, RRD_ALGORITHM_INCREMENTAL);
rd_FragFails = rrddim_add(st, "FragFails", "failed", -1, 1, RRD_ALGORITHM_INCREMENTAL);
@@ -2277,7 +2258,6 @@ int do_proc_net_netstat(int update_every, usec_t dt) {
, update_every
, RRDSET_TYPE_LINE
);
- rrdset_flag_set(st, RRDSET_FLAG_DETAIL);
rd_ReasmOKs = rrddim_add(st, "ReasmOKs", "ok", 1, 1, RRD_ALGORITHM_INCREMENTAL);
rd_ReasmFails = rrddim_add(st, "ReasmFails", "failed", -1, 1, RRD_ALGORITHM_INCREMENTAL);
@@ -2319,7 +2299,6 @@ int do_proc_net_netstat(int update_every, usec_t dt) {
, update_every
, RRDSET_TYPE_LINE
);
- rrdset_flag_set(st, RRDSET_FLAG_DETAIL);
rd_InDiscards = rrddim_add(st, "InDiscards", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
rd_OutDiscards = rrddim_add(st, "OutDiscards", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
@@ -2596,7 +2575,6 @@ int do_proc_net_netstat(int update_every, usec_t dt) {
, update_every
, RRDSET_TYPE_LINE
);
- rrdset_flag_set(st, RRDSET_FLAG_DETAIL);
rd_InErrs = rrddim_add(st, "InErrs", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
rd_InCsumErrors = rrddim_add(st, "InCsumErrors", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
@@ -2631,7 +2609,6 @@ int do_proc_net_netstat(int update_every, usec_t dt) {
, update_every
, RRDSET_TYPE_LINE
);
- rrdset_flag_set(st, RRDSET_FLAG_DETAIL);
rd_ActiveOpens = rrddim_add(st, "ActiveOpens", "active", 1, 1, RRD_ALGORITHM_INCREMENTAL);
rd_PassiveOpens = rrddim_add(st, "PassiveOpens", "passive", 1, 1, RRD_ALGORITHM_INCREMENTAL);
@@ -2666,7 +2643,6 @@ int do_proc_net_netstat(int update_every, usec_t dt) {
, update_every
, RRDSET_TYPE_LINE
);
- rrdset_flag_set(st, RRDSET_FLAG_DETAIL);
rd_EstabResets = rrddim_add(st, "EstabResets", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
rd_OutRsts = rrddim_add(st, "OutRsts", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
@@ -2744,7 +2720,6 @@ int do_proc_net_netstat(int update_every, usec_t dt) {
, update_every
, RRDSET_TYPE_LINE
);
- rrdset_flag_set(st, RRDSET_FLAG_DETAIL);
rd_RcvbufErrors = rrddim_add(st, "RcvbufErrors", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
rd_SndbufErrors = rrddim_add(st, "SndbufErrors", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
diff --git a/src/collectors/proc.plugin/proc_net_rpc_nfs.c b/src/collectors/proc.plugin/proc_net_rpc_nfs.c
index d6547636e..da3243bad 100644
--- a/src/collectors/proc.plugin/proc_net_rpc_nfs.c
+++ b/src/collectors/proc.plugin/proc_net_rpc_nfs.c
@@ -296,8 +296,6 @@ int do_proc_net_rpc_nfs(int update_every, usec_t dt) {
, RRDSET_TYPE_STACKED
);
- rrdset_flag_set(st, RRDSET_FLAG_DETAIL);
-
rd_udp = rrddim_add(st, "udp", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
rd_tcp = rrddim_add(st, "tcp", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
}
@@ -332,7 +330,6 @@ int do_proc_net_rpc_nfs(int update_every, usec_t dt) {
, update_every
, RRDSET_TYPE_LINE
);
- rrdset_flag_set(st, RRDSET_FLAG_DETAIL);
rd_calls = rrddim_add(st, "calls", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
rd_retransmits = rrddim_add(st, "retransmits", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
diff --git a/src/collectors/proc.plugin/proc_net_rpc_nfsd.c b/src/collectors/proc.plugin/proc_net_rpc_nfsd.c
index 1d9127a03..82d74b6a9 100644
--- a/src/collectors/proc.plugin/proc_net_rpc_nfsd.c
+++ b/src/collectors/proc.plugin/proc_net_rpc_nfsd.c
@@ -501,7 +501,6 @@ int do_proc_net_rpc_nfsd(int update_every, usec_t dt) {
, update_every
, RRDSET_TYPE_LINE
);
- rrdset_flag_set(st, RRDSET_FLAG_DETAIL);
rd_stale = rrddim_add(st, "stale", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
}
@@ -587,7 +586,6 @@ int do_proc_net_rpc_nfsd(int update_every, usec_t dt) {
, update_every
, RRDSET_TYPE_STACKED
);
- rrdset_flag_set(st, RRDSET_FLAG_DETAIL);
rd_udp = rrddim_add(st, "udp", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
rd_tcp = rrddim_add(st, "tcp", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
@@ -623,7 +621,6 @@ int do_proc_net_rpc_nfsd(int update_every, usec_t dt) {
, update_every
, RRDSET_TYPE_LINE
);
- rrdset_flag_set(st, RRDSET_FLAG_DETAIL);
rd_calls = rrddim_add(st, "calls", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
rd_bad_format = rrddim_add(st, "bad_format", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
diff --git a/src/collectors/proc.plugin/proc_net_sctp_snmp.c b/src/collectors/proc.plugin/proc_net_sctp_snmp.c
index 4a3d5c912..1987ff7a1 100644
--- a/src/collectors/proc.plugin/proc_net_sctp_snmp.c
+++ b/src/collectors/proc.plugin/proc_net_sctp_snmp.c
@@ -214,7 +214,6 @@ int do_proc_net_sctp_snmp(int update_every, usec_t dt) {
, update_every
, RRDSET_TYPE_LINE
);
- rrdset_flag_set(st, RRDSET_FLAG_DETAIL);
rd_received = rrddim_add(st, "SctpInSCTPPacks", "received", 1, 1, RRD_ALGORITHM_INCREMENTAL);
rd_sent = rrddim_add(st, "SctpOutSCTPPacks", "sent", -1, 1, RRD_ALGORITHM_INCREMENTAL);
@@ -248,7 +247,6 @@ int do_proc_net_sctp_snmp(int update_every, usec_t dt) {
, update_every
, RRDSET_TYPE_LINE
);
- rrdset_flag_set(st, RRDSET_FLAG_DETAIL);
rd_invalid = rrddim_add(st, "SctpOutOfBlues", "invalid", 1, 1, RRD_ALGORITHM_INCREMENTAL);
rd_csum = rrddim_add(st, "SctpChecksumErrors", "checksum", 1, 1, RRD_ALGORITHM_INCREMENTAL);
@@ -282,7 +280,6 @@ int do_proc_net_sctp_snmp(int update_every, usec_t dt) {
, NETDATA_CHART_PRIO_SCTP + 40
, update_every
, RRDSET_TYPE_LINE);
- rrdset_flag_set(st, RRDSET_FLAG_DETAIL);
rd_reassembled = rrddim_add(st, "SctpReasmUsrMsgs", "reassembled", 1, 1, RRD_ALGORITHM_INCREMENTAL);
rd_fragmented = rrddim_add(st, "SctpFragUsrMsgs", "fragmented", -1, 1, RRD_ALGORITHM_INCREMENTAL);
@@ -321,7 +318,6 @@ int do_proc_net_sctp_snmp(int update_every, usec_t dt) {
, update_every
, RRDSET_TYPE_LINE
);
- rrdset_flag_set(st, RRDSET_FLAG_DETAIL);
rd_InCtrl = rrddim_add(st, "SctpInCtrlChunks", "InCtrl", 1, 1, RRD_ALGORITHM_INCREMENTAL);
rd_InOrder = rrddim_add(st, "SctpInOrderChunks", "InOrder", 1, 1, RRD_ALGORITHM_INCREMENTAL);
diff --git a/src/collectors/proc.plugin/proc_net_sockstat.c b/src/collectors/proc.plugin/proc_net_sockstat.c
index da8682b51..185eb4e5a 100644
--- a/src/collectors/proc.plugin/proc_net_sockstat.c
+++ b/src/collectors/proc.plugin/proc_net_sockstat.c
@@ -128,7 +128,7 @@ int do_proc_net_sockstat(int update_every, usec_t dt) {
do_frag_sockets = config_get_boolean_ondemand("plugin:proc:/proc/net/sockstat", "ipv4 FRAG sockets", CONFIG_BOOLEAN_AUTO);
do_frag_mem = config_get_boolean_ondemand("plugin:proc:/proc/net/sockstat", "ipv4 FRAG memory", CONFIG_BOOLEAN_AUTO);
- update_constants_every = config_get_number("plugin:proc:/proc/net/sockstat", "update constants every", update_constants_every);
+ update_constants_every = config_get_duration_seconds("plugin:proc:/proc/net/sockstat", "update constants every", update_constants_every);
update_constants_count = update_constants_every;
arl_sockets = arl_create("sockstat/sockets", arl_callback_str2kernel_uint_t, 60);
diff --git a/src/collectors/proc.plugin/proc_net_stat_conntrack.c b/src/collectors/proc.plugin/proc_net_stat_conntrack.c
index 6951cba79..cb1c8837d 100644
--- a/src/collectors/proc.plugin/proc_net_stat_conntrack.c
+++ b/src/collectors/proc.plugin/proc_net_stat_conntrack.c
@@ -11,7 +11,7 @@ int do_proc_net_stat_conntrack(int update_every, usec_t dt) {
static int do_sockets = -1, do_new = -1, do_changes = -1, do_expect = -1, do_search = -1, do_errors = -1;
static usec_t get_max_every = 10 * USEC_PER_SEC, usec_since_last_max = 0;
static int read_full = 1;
- static char *nf_conntrack_filename, *nf_conntrack_count_filename, *nf_conntrack_max_filename;
+ static const char *nf_conntrack_filename, *nf_conntrack_count_filename, *nf_conntrack_max_filename;
static const RRDVAR_ACQUIRED *rrdvar_max = NULL;
unsigned long long aentries = 0, asearched = 0, afound = 0, anew = 0, ainvalid = 0, aignore = 0, adelete = 0, adelete_list = 0,
@@ -217,7 +217,6 @@ int do_proc_net_stat_conntrack(int update_every, usec_t dt) {
, update_every
, RRDSET_TYPE_LINE
);
- rrdset_flag_set(st, RRDSET_FLAG_DETAIL);
rd_inserted = rrddim_add(st, "inserted", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
rd_deleted = rrddim_add(st, "deleted", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
@@ -253,7 +252,6 @@ int do_proc_net_stat_conntrack(int update_every, usec_t dt) {
, update_every
, RRDSET_TYPE_LINE
);
- rrdset_flag_set(st, RRDSET_FLAG_DETAIL);
rd_created = rrddim_add(st, "created", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
rd_deleted = rrddim_add(st, "deleted", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
@@ -289,7 +287,6 @@ int do_proc_net_stat_conntrack(int update_every, usec_t dt) {
, update_every
, RRDSET_TYPE_LINE
);
- rrdset_flag_set(st, RRDSET_FLAG_DETAIL);
rd_searched = rrddim_add(st, "searched", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
rd_restarted = rrddim_add(st, "restarted", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
@@ -326,7 +323,6 @@ int do_proc_net_stat_conntrack(int update_every, usec_t dt) {
, update_every
, RRDSET_TYPE_LINE
);
- rrdset_flag_set(st, RRDSET_FLAG_DETAIL);
rd_icmp_error = rrddim_add(st, "icmp_error", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
rd_insert_failed = rrddim_add(st, "insert_failed", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
diff --git a/src/collectors/proc.plugin/proc_net_wireless.c b/src/collectors/proc.plugin/proc_net_wireless.c
index c7efa3335..f5556a942 100644
--- a/src/collectors/proc.plugin/proc_net_wireless.c
+++ b/src/collectors/proc.plugin/proc_net_wireless.c
@@ -208,7 +208,7 @@ int do_proc_net_wireless(int update_every, usec_t dt)
UNUSED(dt);
static procfile *ff = NULL;
static int do_status, do_quality = -1, do_discarded_packets, do_beacon;
- static char *proc_net_wireless_filename = NULL;
+ static const char *proc_net_wireless_filename = NULL;
if (unlikely(do_quality == -1)) {
char filename[FILENAME_MAX + 1];
@@ -264,8 +264,6 @@ int do_proc_net_wireless(int update_every, usec_t dt)
update_every,
RRDSET_TYPE_LINE);
- rrdset_flag_set(wireless_dev->st_status, RRDSET_FLAG_DETAIL);
-
wireless_dev->rd_status = rrddim_add(wireless_dev->st_status, "status", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
add_labels_to_wireless(wireless_dev, wireless_dev->st_status);
@@ -295,7 +293,6 @@ int do_proc_net_wireless(int update_every, usec_t dt)
NETDATA_CHART_PRIO_WIRELESS_IFACE + 1,
update_every,
RRDSET_TYPE_LINE);
- rrdset_flag_set(wireless_dev->st_link, RRDSET_FLAG_DETAIL);
wireless_dev->rd_link = rrddim_add(wireless_dev->st_link, "link_quality", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
@@ -316,7 +313,6 @@ int do_proc_net_wireless(int update_every, usec_t dt)
NETDATA_CHART_PRIO_WIRELESS_IFACE + 2,
update_every,
RRDSET_TYPE_LINE);
- rrdset_flag_set(wireless_dev->st_level, RRDSET_FLAG_DETAIL);
wireless_dev->rd_level = rrddim_add(wireless_dev->st_level, "signal_level", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
@@ -337,7 +333,6 @@ int do_proc_net_wireless(int update_every, usec_t dt)
NETDATA_CHART_PRIO_WIRELESS_IFACE + 3,
update_every,
RRDSET_TYPE_LINE);
- rrdset_flag_set(wireless_dev->st_noise, RRDSET_FLAG_DETAIL);
wireless_dev->rd_noise = rrddim_add(wireless_dev->st_noise, "noise_level", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
@@ -376,8 +371,6 @@ int do_proc_net_wireless(int update_every, usec_t dt)
update_every,
RRDSET_TYPE_LINE);
- rrdset_flag_set(wireless_dev->st_discarded_packets, RRDSET_FLAG_DETAIL);
-
wireless_dev->rd_nwid = rrddim_add(wireless_dev->st_discarded_packets, "nwid", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
wireless_dev->rd_crypt = rrddim_add(wireless_dev->st_discarded_packets, "crypt", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
wireless_dev->rd_frag = rrddim_add(wireless_dev->st_discarded_packets, "frag", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
@@ -414,8 +407,6 @@ int do_proc_net_wireless(int update_every, usec_t dt)
update_every,
RRDSET_TYPE_LINE);
- rrdset_flag_set(wireless_dev->st_missed_beacon, RRDSET_FLAG_DETAIL);
-
wireless_dev->rd_missed_beacon = rrddim_add(wireless_dev->st_missed_beacon, "missed_beacons", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
add_labels_to_wireless(wireless_dev, wireless_dev->st_missed_beacon);
diff --git a/src/collectors/proc.plugin/proc_pressure.c b/src/collectors/proc.plugin/proc_pressure.c
index 4037e60ac..c4d4bc2b1 100644
--- a/src/collectors/proc.plugin/proc_pressure.c
+++ b/src/collectors/proc.plugin/proc_pressure.c
@@ -158,7 +158,7 @@ int do_proc_pressure(int update_every, usec_t dt) {
int i;
static usec_t next_pressure_dt = 0;
- static char *base_path = NULL;
+ static const char *base_path = NULL;
update_every = (update_every < MIN_PRESSURE_UPDATE_EVERY) ? MIN_PRESSURE_UPDATE_EVERY : update_every;
pressure_update_every = update_every;
@@ -170,9 +170,8 @@ int do_proc_pressure(int update_every, usec_t dt) {
return 0;
}
- if (unlikely(!base_path)) {
+ if (unlikely(!base_path))
base_path = config_get(CONFIG_SECTION_PLUGIN_PROC_PRESSURE, "base path of pressure metrics", "/proc/pressure");
- }
for (i = 0; i < PRESSURE_NUM_RESOURCES; i++) {
procfile *ff = resource_info[i].pf;
diff --git a/src/collectors/proc.plugin/proc_spl_kstat_zfs.c b/src/collectors/proc.plugin/proc_spl_kstat_zfs.c
index 5a0f90951..9286479c9 100644
--- a/src/collectors/proc.plugin/proc_spl_kstat_zfs.c
+++ b/src/collectors/proc.plugin/proc_spl_kstat_zfs.c
@@ -18,7 +18,7 @@ int do_proc_spl_kstat_zfs_arcstats(int update_every, usec_t dt) {
static int do_zfs_stats = 0;
static procfile *ff = NULL;
- static char *dirname = NULL;
+ static const char *dirname = NULL;
static ARL_BASE *arl_base = NULL;
arcstats.l2exist = -1;
diff --git a/src/collectors/proc.plugin/proc_stat.c b/src/collectors/proc.plugin/proc_stat.c
index c211ceee5..b36f6b867 100644
--- a/src/collectors/proc.plugin/proc_stat.c
+++ b/src/collectors/proc.plugin/proc_stat.c
@@ -293,7 +293,7 @@ static void* wake_cpu_thread(void* core) {
return 0;
}
-static int read_schedstat(char *schedstat_filename, struct per_core_cpuidle_chart **cpuidle_charts_address, size_t *schedstat_cores_found) {
+static int read_schedstat(const char *schedstat_filename, struct per_core_cpuidle_chart **cpuidle_charts_address, size_t *schedstat_cores_found) {
static size_t cpuidle_charts_len = 0;
static procfile *ff = NULL;
struct per_core_cpuidle_chart *cpuidle_charts = *cpuidle_charts_address;
@@ -373,7 +373,7 @@ static int read_one_state(char *buf, const char *filename, int *fd) {
return 1;
}
-static int read_cpuidle_states(char *cpuidle_name_filename , char *cpuidle_time_filename, struct per_core_cpuidle_chart *cpuidle_charts, size_t core) {
+static int read_cpuidle_states(const char *cpuidle_name_filename, const char *cpuidle_time_filename, struct per_core_cpuidle_chart *cpuidle_charts, size_t core) {
char filename[FILENAME_MAX + 1];
static char next_state_filename[FILENAME_MAX + 1];
struct stat stbuf;
@@ -484,7 +484,7 @@ int do_proc_stat(int update_every, usec_t dt) {
static int do_cpu = -1, do_cpu_cores = -1, do_interrupts = -1, do_context = -1, do_forks = -1, do_processes = -1,
do_core_throttle_count = -1, do_package_throttle_count = -1, do_cpu_freq = -1, do_cpuidle = -1;
static uint32_t hash_intr, hash_ctxt, hash_processes, hash_procs_running, hash_procs_blocked;
- static char *core_throttle_count_filename = NULL, *package_throttle_count_filename = NULL, *scaling_cur_freq_filename = NULL,
+ static const char *core_throttle_count_filename = NULL, *package_throttle_count_filename = NULL, *scaling_cur_freq_filename = NULL,
*time_in_state_filename = NULL, *schedstat_filename = NULL, *cpuidle_name_filename = NULL, *cpuidle_time_filename = NULL;
static const RRDVAR_ACQUIRED *cpus_var = NULL;
static int accurate_freq_avail = 0, accurate_freq_is_used = 0;
@@ -794,7 +794,6 @@ int do_proc_stat(int update_every, usec_t dt) {
, update_every
, RRDSET_TYPE_LINE
);
- rrdset_flag_set(st_forks, RRDSET_FLAG_DETAIL);
rd_started = rrddim_add(st_forks, "started", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
}
diff --git a/src/collectors/proc.plugin/proc_uptime.c b/src/collectors/proc.plugin/proc_uptime.c
index ddab7269b..7471171ed 100644
--- a/src/collectors/proc.plugin/proc_uptime.c
+++ b/src/collectors/proc.plugin/proc_uptime.c
@@ -5,7 +5,7 @@
int do_proc_uptime(int update_every, usec_t dt) {
(void)dt;
- static char *uptime_filename = NULL;
+ static const char *uptime_filename = NULL;
if(!uptime_filename) {
char filename[FILENAME_MAX + 1];
snprintfz(filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/proc/uptime");
diff --git a/src/collectors/proc.plugin/proc_vmstat.c b/src/collectors/proc.plugin/proc_vmstat.c
index 050086689..e38e9b678 100644
--- a/src/collectors/proc.plugin/proc_vmstat.c
+++ b/src/collectors/proc.plugin/proc_vmstat.c
@@ -355,8 +355,6 @@ int do_proc_vmstat(int update_every, usec_t dt) {
, RRDSET_TYPE_LINE
);
- rrdset_flag_set(st_oom_kill, RRDSET_FLAG_DETAIL);
-
rd_oom_kill = rrddim_add(st_oom_kill, "kills", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
}
@@ -397,8 +395,6 @@ int do_proc_vmstat(int update_every, usec_t dt) {
, RRDSET_TYPE_LINE
);
- rrdset_flag_set(st_numa, RRDSET_FLAG_DETAIL);
-
// These depend on CONFIG_NUMA in the kernel.
rd_local = rrddim_add(st_numa, "local", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
rd_foreign = rrddim_add(st_numa, "foreign", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
diff --git a/src/collectors/proc.plugin/sys_class_drm.c b/src/collectors/proc.plugin/sys_class_drm.c
index ab4d98a72..0622274a0 100644
--- a/src/collectors/proc.plugin/sys_class_drm.c
+++ b/src/collectors/proc.plugin/sys_class_drm.c
@@ -837,7 +837,7 @@ int do_sys_class_drm(int update_every, usec_t dt) {
if(unlikely(!drm_dir)) {
char filename[FILENAME_MAX + 1];
snprintfz(filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/sys/class/drm");
- char *drm_dir_name = config_get(CONFIG_SECTION_PLUGIN_PROC_DRM, "directory to monitor", filename);
+ const char *drm_dir_name = config_get(CONFIG_SECTION_PLUGIN_PROC_DRM, "directory to monitor", filename);
if(unlikely(NULL == (drm_dir = opendir(drm_dir_name)))){
collector_error("Cannot read directory '%s'", drm_dir_name);
return 1;
diff --git a/src/collectors/proc.plugin/sys_class_infiniband.c b/src/collectors/proc.plugin/sys_class_infiniband.c
index ff1652ddf..34a126c5e 100644
--- a/src/collectors/proc.plugin/sys_class_infiniband.c
+++ b/src/collectors/proc.plugin/sys_class_infiniband.c
@@ -302,7 +302,7 @@ int do_sys_class_infiniband(int update_every, usec_t dt)
static int initialized = 0;
static int enable_new_ports = -1, enable_only_active = CONFIG_BOOLEAN_YES;
static int do_bytes = -1, do_packets = -1, do_errors = -1, do_hwpackets = -1, do_hwerrors = -1;
- static char *sys_class_infiniband_dirname = NULL;
+ static const char *sys_class_infiniband_dirname = NULL;
static long long int dt_to_refresh_ports = 0, last_refresh_ports_usec = 0;
@@ -332,7 +332,7 @@ int do_sys_class_infiniband(int update_every, usec_t dt)
SIMPLE_PATTERN_EXACT, true);
dt_to_refresh_ports =
- config_get_number(CONFIG_SECTION_PLUGIN_SYS_CLASS_INFINIBAND, "refresh ports state every seconds", 30) *
+ config_get_duration_seconds(CONFIG_SECTION_PLUGIN_SYS_CLASS_INFINIBAND, "refresh ports state every", 30) *
USEC_PER_SEC;
if (dt_to_refresh_ports < 0)
dt_to_refresh_ports = 0;
@@ -538,8 +538,7 @@ int do_sys_class_infiniband(int update_every, usec_t dt)
port->priority + 1,
update_every,
RRDSET_TYPE_AREA);
- // Create Dimensions
- rrdset_flag_set(port->st_bytes, RRDSET_FLAG_DETAIL);
+
// On this chart, we want to have a KB/s so the dashboard will autoscale it
// The reported values are also per-lane, so we must multiply it by the width
// x4 lanes multiplier as per Documentation/ABI/stable/sysfs-class-infiniband
@@ -576,8 +575,7 @@ int do_sys_class_infiniband(int update_every, usec_t dt)
port->priority + 2,
update_every,
RRDSET_TYPE_AREA);
- // Create Dimensions
- rrdset_flag_set(port->st_packets, RRDSET_FLAG_DETAIL);
+
FOREACH_COUNTER_PACKETS(GEN_RRD_DIM_ADD, port)
}
@@ -605,8 +603,7 @@ int do_sys_class_infiniband(int update_every, usec_t dt)
port->priority + 3,
update_every,
RRDSET_TYPE_LINE);
- // Create Dimensions
- rrdset_flag_set(port->st_errors, RRDSET_FLAG_DETAIL);
+
FOREACH_COUNTER_ERRORS(GEN_RRD_DIM_ADD, port)
}
@@ -641,8 +638,6 @@ int do_sys_class_infiniband(int update_every, usec_t dt)
update_every,
RRDSET_TYPE_LINE);
- rrdset_flag_set(port->st_hwerrors, RRDSET_FLAG_DETAIL);
-
// VENDORS: Set your selection
// VENDOR: Mellanox
@@ -677,8 +672,6 @@ int do_sys_class_infiniband(int update_every, usec_t dt)
update_every,
RRDSET_TYPE_LINE);
- rrdset_flag_set(port->st_hwpackets, RRDSET_FLAG_DETAIL);
-
// VENDORS: Set your selection
// VENDOR: Mellanox
diff --git a/src/collectors/proc.plugin/sys_class_power_supply.c b/src/collectors/proc.plugin/sys_class_power_supply.c
index c6be72679..7e4dda777 100644
--- a/src/collectors/proc.plugin/sys_class_power_supply.c
+++ b/src/collectors/proc.plugin/sys_class_power_supply.c
@@ -199,7 +199,7 @@ int do_sys_class_power_supply(int update_every, usec_t dt) {
(void)dt;
static int do_capacity = -1, do_power = -1, do_property[3] = {-1};
static int keep_fds_open = CONFIG_BOOLEAN_NO, keep_fds_open_config = -1;
- static char *dirname = NULL;
+ static const char *dirname = NULL;
if(unlikely(do_capacity == -1)) {
do_capacity = config_get_boolean("plugin:proc:/sys/class/power_supply", "battery capacity", CONFIG_BOOLEAN_YES);
diff --git a/src/collectors/proc.plugin/sys_devices_pci_aer.c b/src/collectors/proc.plugin/sys_devices_pci_aer.c
index 563ebf051..c39795ea1 100644
--- a/src/collectors/proc.plugin/sys_devices_pci_aer.c
+++ b/src/collectors/proc.plugin/sys_devices_pci_aer.c
@@ -2,7 +2,7 @@
#include "plugin_proc.h"
-static char *pci_aer_dirname = NULL;
+static const char *pci_aer_dirname = NULL;
typedef enum __attribute__((packed)) {
AER_DEV_NONFATAL = (1 << 0),
diff --git a/src/collectors/proc.plugin/sys_devices_system_edac_mc.c b/src/collectors/proc.plugin/sys_devices_system_edac_mc.c
index d3db8c044..93ee235cf 100644
--- a/src/collectors/proc.plugin/sys_devices_system_edac_mc.c
+++ b/src/collectors/proc.plugin/sys_devices_system_edac_mc.c
@@ -37,7 +37,7 @@ struct mc {
};
static struct mc *mc_root = NULL;
-static char *mc_dirname = NULL;
+static const char *mc_dirname = NULL;
static void find_all_mc() {
char name[FILENAME_MAX + 1];
diff --git a/src/collectors/proc.plugin/sys_devices_system_node.c b/src/collectors/proc.plugin/sys_devices_system_node.c
index 12f31a04e..9bc3703fa 100644
--- a/src/collectors/proc.plugin/sys_devices_system_node.c
+++ b/src/collectors/proc.plugin/sys_devices_system_node.c
@@ -4,22 +4,35 @@
struct node {
char *name;
- char *numastat_filename;
- procfile *numastat_ff;
- RRDSET *numastat_st;
+
+ struct {
+ char *filename;
+ procfile *ff;
+ RRDSET *st;
+ } numastat;
+
+ struct {
+ char *filename;
+ procfile *ff;
+ RRDSET *st_mem_usage;
+ RRDSET *st_mem_activity;
+ } meminfo;
+
struct node *next;
};
static struct node *numa_root = NULL;
+static int numa_node_count = 0;
static int find_all_nodes() {
int numa_node_count = 0;
char name[FILENAME_MAX + 1];
snprintfz(name, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/sys/devices/system/node");
- char *dirname = config_get("plugin:proc:/sys/devices/system/node", "directory to monitor", name);
+ const char *dirname = config_get("plugin:proc:/sys/devices/system/node", "directory to monitor", name);
DIR *dir = opendir(dirname);
if(!dir) {
- collector_error("Cannot read NUMA node directory '%s'", dirname);
+ nd_log(
+ NDLS_COLLECTORS, errno == ENOENT ? NDLP_INFO : NDLP_ERR, "Cannot read NUMA node directory '%s'", dirname);
return 0;
}
@@ -47,8 +60,16 @@ static int find_all_nodes() {
freez(m);
continue;
}
+ m->numastat.filename = strdupz(name);
- m->numastat_filename = strdupz(name);
+ snprintfz(name, FILENAME_MAX, "%s/%s/meminfo", dirname, de->d_name);
+ if(stat(name, &st) == -1) {
+ freez(m->numastat.filename);
+ freez(m->name);
+ freez(m);
+ continue;
+ }
+ m->meminfo.filename = strdupz(name);
m->next = numa_root;
numa_root = m;
@@ -59,22 +80,12 @@ static int find_all_nodes() {
return numa_node_count;
}
-int do_proc_sys_devices_system_node(int update_every, usec_t dt) {
- (void)dt;
-
+static void do_muma_numastat(struct node *m, int update_every) {
static uint32_t hash_local_node = 0, hash_numa_foreign = 0, hash_interleave_hit = 0, hash_other_node = 0, hash_numa_hit = 0, hash_numa_miss = 0;
- static int do_numastat = -1, numa_node_count = 0;
- struct node *m;
-
- if(unlikely(numa_root == NULL)) {
- numa_node_count = find_all_nodes();
- if(unlikely(numa_root == NULL))
- return 1;
- }
-
- if(unlikely(do_numastat == -1)) {
- do_numastat = config_get_boolean_ondemand("plugin:proc:/sys/devices/system/node", "enable per-node numa metrics", CONFIG_BOOLEAN_AUTO);
+ static bool initialized = false;
+ if(unlikely(!initialized)) {
+ initialized = true;
hash_local_node = simple_hash("local_node");
hash_numa_foreign = simple_hash("numa_foreign");
hash_interleave_hit = simple_hash("interleave_hit");
@@ -83,82 +94,205 @@ int do_proc_sys_devices_system_node(int update_every, usec_t dt) {
hash_numa_miss = simple_hash("numa_miss");
}
- if (do_numastat == CONFIG_BOOLEAN_YES || (do_numastat == CONFIG_BOOLEAN_AUTO && numa_node_count >= 2)) {
- for(m = numa_root; m; m = m->next) {
- if(m->numastat_filename) {
-
- if(unlikely(!m->numastat_ff)) {
- m->numastat_ff = procfile_open(m->numastat_filename, " ", PROCFILE_FLAG_DEFAULT);
-
- if(unlikely(!m->numastat_ff))
- continue;
- }
-
- m->numastat_ff = procfile_readall(m->numastat_ff);
- if(unlikely(!m->numastat_ff || procfile_lines(m->numastat_ff) < 1 || procfile_linewords(m->numastat_ff, 0) < 1))
- continue;
-
- if(unlikely(!m->numastat_st)) {
- m->numastat_st = rrdset_create_localhost(
- "mem"
- , m->name
- , NULL
- , "numa"
- , "mem.numa_nodes"
- , "NUMA events"
- , "events/s"
- , PLUGIN_PROC_NAME
- , "/sys/devices/system/node"
- , NETDATA_CHART_PRIO_MEM_NUMA_NODES
- , update_every
- , RRDSET_TYPE_LINE
- );
-
- rrdlabels_add(m->numastat_st->rrdlabels, "numa_node", m->name, RRDLABEL_SRC_AUTO);
-
- rrdset_flag_set(m->numastat_st, RRDSET_FLAG_DETAIL);
-
- rrddim_add(m->numastat_st, "numa_hit", "hit", 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rrddim_add(m->numastat_st, "numa_miss", "miss", 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rrddim_add(m->numastat_st, "local_node", "local", 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rrddim_add(m->numastat_st, "numa_foreign", "foreign", 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rrddim_add(m->numastat_st, "interleave_hit", "interleave", 1, 1, RRD_ALGORITHM_INCREMENTAL);
- rrddim_add(m->numastat_st, "other_node", "other", 1, 1, RRD_ALGORITHM_INCREMENTAL);
-
- }
-
- size_t lines = procfile_lines(m->numastat_ff), l;
- for(l = 0; l < lines; l++) {
- size_t words = procfile_linewords(m->numastat_ff, l);
-
- if(unlikely(words < 2)) {
- if(unlikely(words))
- collector_error("Cannot read %s numastat line %zu. Expected 2 params, read %zu.", m->name, l, words);
- continue;
- }
-
- char *name = procfile_lineword(m->numastat_ff, l, 0);
- char *value = procfile_lineword(m->numastat_ff, l, 1);
-
- if (unlikely(!name || !*name || !value || !*value))
- continue;
-
- uint32_t hash = simple_hash(name);
- if(likely(
- (hash == hash_numa_hit && !strcmp(name, "numa_hit"))
- || (hash == hash_numa_miss && !strcmp(name, "numa_miss"))
- || (hash == hash_local_node && !strcmp(name, "local_node"))
- || (hash == hash_numa_foreign && !strcmp(name, "numa_foreign"))
- || (hash == hash_interleave_hit && !strcmp(name, "interleave_hit"))
- || (hash == hash_other_node && !strcmp(name, "other_node"))
- ))
- rrddim_set(m->numastat_st, name, (collected_number)str2kernel_uint_t(value));
- }
-
- rrdset_done(m->numastat_st);
+ if (m->numastat.filename) {
+ if(unlikely(!m->numastat.ff)) {
+ m->numastat.ff = procfile_open(m->numastat.filename, " ", PROCFILE_FLAG_DEFAULT);
+
+ if(unlikely(!m->numastat.ff))
+ return;
+ }
+
+ m->numastat.ff = procfile_readall(m->numastat.ff);
+ if(unlikely(!m->numastat.ff || procfile_lines(m->numastat.ff) < 1 || procfile_linewords(m->numastat.ff, 0) < 1))
+ return;
+
+ if(unlikely(!m->numastat.st)) {
+ m->numastat.st = rrdset_create_localhost(
+ "numa_node_stat"
+ , m->name
+ , NULL
+ , "numa"
+ , "mem.numa_node_stat"
+ , "NUMA Node Memory Allocation Events"
+ , "events/s"
+ , PLUGIN_PROC_NAME
+ , "/sys/devices/system/node"
+ , NETDATA_CHART_PRIO_MEM_NUMA_NODES_NUMASTAT
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ rrdlabels_add(m->numastat.st->rrdlabels, "numa_node", m->name, RRDLABEL_SRC_AUTO);
+
+ rrddim_add(m->numastat.st, "numa_hit", "hit", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rrddim_add(m->numastat.st, "numa_miss", "miss", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rrddim_add(m->numastat.st, "local_node", "local", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rrddim_add(m->numastat.st, "numa_foreign", "foreign", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rrddim_add(m->numastat.st, "interleave_hit", "interleave", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rrddim_add(m->numastat.st, "other_node", "other", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+
+ }
+
+ size_t lines = procfile_lines(m->numastat.ff), l;
+ for(l = 0; l < lines; l++) {
+ size_t words = procfile_linewords(m->numastat.ff, l);
+
+ if(unlikely(words < 2)) {
+ if(unlikely(words))
+ collector_error("Cannot read %s line %zu. Expected 2 params, read %zu.", m->numastat.filename, l, words);
+ continue;
+ }
+
+ char *name = procfile_lineword(m->numastat.ff, l, 0);
+ char *value = procfile_lineword(m->numastat.ff, l, 1);
+
+ if (unlikely(!name || !*name || !value || !*value))
+ continue;
+
+ uint32_t hash = simple_hash(name);
+
+ if ((hash == hash_numa_hit && !strcmp(name, "numa_hit")) ||
+ (hash == hash_numa_miss && !strcmp(name, "numa_miss")) ||
+ (hash == hash_local_node && !strcmp(name, "local_node")) ||
+ (hash == hash_numa_foreign && !strcmp(name, "numa_foreign")) ||
+ (hash == hash_interleave_hit && !strcmp(name, "interleave_hit")) ||
+ (hash == hash_other_node && !strcmp(name, "other_node"))) {
+ rrddim_set(m->numastat.st, name, (collected_number)str2kernel_uint_t(value));
+ }
+ }
+
+ rrdset_done(m->numastat.st);
+ }
+}
+
+static void do_numa_meminfo(struct node *m, int update_every) {
+ static uint32_t hash_MemFree = 0, hash_MemUsed = 0, hash_ActiveAnon = 0, hash_InactiveAnon = 0, hash_ActiveFile = 0,
+ hash_InactiveFile = 0;
+ static bool initialized = false;
+
+ if (unlikely(!initialized)) {
+ initialized = true;
+ hash_MemFree = simple_hash("MemFree");
+ hash_MemUsed = simple_hash("MemUsed");
+ hash_ActiveAnon = simple_hash("Active(anon)");
+ hash_InactiveAnon = simple_hash("Inactive(anon)");
+ hash_ActiveFile = simple_hash("Active(file)");
+ hash_InactiveFile = simple_hash("Inactive(file)");
+ }
+
+ if (m->meminfo.filename) {
+ if (unlikely(!m->meminfo.ff)) {
+ m->meminfo.ff = procfile_open(m->meminfo.filename, " :", PROCFILE_FLAG_DEFAULT);
+ if (unlikely(!m->meminfo.ff))
+ return;
+ }
+
+ m->meminfo.ff = procfile_readall(m->meminfo.ff);
+ if (unlikely(!m->meminfo.ff || procfile_lines(m->meminfo.ff) < 1 || procfile_linewords(m->meminfo.ff, 0) < 1))
+ return;
+
+ if (unlikely(!m->meminfo.st_mem_usage)) {
+ m->meminfo.st_mem_usage = rrdset_create_localhost(
+ "numa_node_mem_usage",
+ m->name,
+ NULL,
+ "numa",
+ "mem.numa_node_mem_usage",
+ "NUMA Node Memory Usage",
+ "bytes",
+ PLUGIN_PROC_NAME,
+ "/sys/devices/system/node",
+ NETDATA_CHART_PRIO_MEM_NUMA_NODES_MEMINFO,
+ update_every,
+ RRDSET_TYPE_STACKED);
+
+ rrdlabels_add(m->meminfo.st_mem_usage->rrdlabels, "numa_node", m->name, RRDLABEL_SRC_AUTO);
+
+ rrddim_add(m->meminfo.st_mem_usage, "MemFree", "free", 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ rrddim_add(m->meminfo.st_mem_usage, "MemUsed", "used", 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ }
+ if (unlikely(!m->meminfo.st_mem_activity)) {
+ m->meminfo.st_mem_activity = rrdset_create_localhost(
+ "numa_node_mem_activity",
+ m->name,
+ NULL,
+ "numa",
+ "mem.numa_node_mem_activity",
+ "NUMA Node Memory Activity",
+ "bytes",
+ PLUGIN_PROC_NAME,
+ "/sys/devices/system/node",
+ NETDATA_CHART_PRIO_MEM_NUMA_NODES_ACTIVITY,
+ update_every,
+ RRDSET_TYPE_STACKED);
+
+ rrdlabels_add(m->meminfo.st_mem_activity->rrdlabels, "numa_node", m->name, RRDLABEL_SRC_AUTO);
+
+ rrddim_add(m->meminfo.st_mem_activity, "Active(anon)", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ rrddim_add(m->meminfo.st_mem_activity, "Inactive(anon)", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ rrddim_add(m->meminfo.st_mem_activity, "Active(file)", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ rrddim_add(m->meminfo.st_mem_activity, "Inactive(file)", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ }
+
+ size_t lines = procfile_lines(m->meminfo.ff), l;
+ for (l = 0; l < lines; l++) {
+ size_t words = procfile_linewords(m->meminfo.ff, l);
+
+ if (unlikely(words < 4)) {
+ if (words)
+ collector_error(
+ "Cannot read %s line %zu. Expected 4 params, read %zu.", m->meminfo.filename, l, words);
+ continue;
}
+
+ char *name = procfile_lineword(m->meminfo.ff, l, 2);
+ char *value = procfile_lineword(m->meminfo.ff, l, 3);
+
+ if (unlikely(!name || !*name || !value || !*value))
+ continue;
+
+ uint32_t hash = simple_hash(name);
+
+ if ((hash == hash_MemFree && !strcmp(name, "MemFree")) ||
+ (hash == hash_MemUsed && !strcmp(name, "MemUsed"))) {
+ rrddim_set(m->meminfo.st_mem_usage, name, (collected_number)str2kernel_uint_t(value) * 1024);
+ } else if (
+ (hash == hash_ActiveAnon && !strcmp(name, "Active(anon)")) ||
+ (hash == hash_InactiveAnon && !strcmp(name, "Inactive(anon)")) ||
+ (hash == hash_ActiveFile && !strcmp(name, "Active(file)")) ||
+ (hash == hash_InactiveFile && !strcmp(name, "Inactive(file)"))) {
+ rrddim_set(m->meminfo.st_mem_activity, name, (collected_number)str2kernel_uint_t(value) * 1024);
+ }
+ }
+ rrdset_done(m->meminfo.st_mem_usage);
+ rrdset_done(m->meminfo.st_mem_activity);
+ }
+}
+
+int do_proc_sys_devices_system_node(int update_every, usec_t dt) {
+ (void)dt;
+ struct node *m;
+
+ static int do_numastat = -1;
+
+ if(unlikely(do_numastat == -1)) {
+ do_numastat = config_get_boolean_ondemand(
+ "plugin:proc:/sys/devices/system/node", "enable per-node numa metrics", CONFIG_BOOLEAN_AUTO);
+ }
+
+ if(unlikely(numa_root == NULL)) {
+ numa_node_count = find_all_nodes();
+ if(unlikely(numa_root == NULL))
+ return 1;
+ }
+
+ if (do_numastat == CONFIG_BOOLEAN_YES || (do_numastat == CONFIG_BOOLEAN_AUTO && numa_node_count >= 2)) {
+ for (m = numa_root; m; m = m->next) {
+ do_muma_numastat(m, update_every);
+ do_numa_meminfo(m, update_every);
}
+ return 0;
}
- return 0;
+ return 1;
}
diff --git a/src/collectors/proc.plugin/sys_fs_btrfs.c b/src/collectors/proc.plugin/sys_fs_btrfs.c
index bf9b002bc..f51461146 100644
--- a/src/collectors/proc.plugin/sys_fs_btrfs.c
+++ b/src/collectors/proc.plugin/sys_fs_btrfs.c
@@ -270,8 +270,8 @@ static inline int find_btrfs_disks(BTRFS_NODE *node, const char *path) {
DIR *dir = opendir(path);
if (!dir) {
- if(!node->logged_error) {
- collector_error("BTRFS: Cannot open directory '%s'.", path);
+ if (!node->logged_error) {
+ nd_log(NDLS_COLLECTORS, errno == ENOENT ? NDLP_INFO : NDLP_ERR, "BTRFS: Cannot open directory '%s'.", path);
node->logged_error = 1;
}
return 1;
@@ -374,8 +374,8 @@ static inline int find_btrfs_devices(BTRFS_NODE *node, const char *path) {
DIR *dir = opendir(path);
if (!dir) {
- if(!node->logged_error) {
- collector_error("BTRFS: Cannot open directory '%s'.", path);
+ if (!node->logged_error) {
+ nd_log(NDLS_COLLECTORS, errno == ENOENT ? NDLP_INFO : NDLP_ERR, "BTRFS: Cannot open directory '%s'.", path);
node->logged_error = 1;
}
return 1;
@@ -474,8 +474,8 @@ static inline int find_all_btrfs_pools(const char *path, int update_every) {
DIR *dir = opendir(path);
if (!dir) {
- if(!logged_error) {
- collector_error("BTRFS: Cannot open directory '%s'.", path);
+ if (!logged_error) {
+ nd_log(NDLS_COLLECTORS, errno == ENOENT ? NDLP_INFO : NDLP_ERR, "BTRFS: Cannot open directory '%s'.", path);
logged_error = 1;
}
return 1;
@@ -678,7 +678,7 @@ int do_sys_fs_btrfs(int update_every, usec_t dt) {
, do_error_stats = CONFIG_BOOLEAN_AUTO;
static usec_t refresh_delta = 0, refresh_every = 60 * USEC_PER_SEC;
- static char *btrfs_path = NULL;
+ static const char *btrfs_path = NULL;
(void)dt;
@@ -689,7 +689,7 @@ int do_sys_fs_btrfs(int update_every, usec_t dt) {
snprintfz(filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, "/sys/fs/btrfs");
btrfs_path = config_get("plugin:proc:/sys/fs/btrfs", "path to monitor", filename);
- refresh_every = config_get_number("plugin:proc:/sys/fs/btrfs", "check for btrfs changes every", refresh_every / USEC_PER_SEC) * USEC_PER_SEC;
+ refresh_every = config_get_duration_seconds("plugin:proc:/sys/fs/btrfs", "check for btrfs changes every", refresh_every / USEC_PER_SEC) * USEC_PER_SEC;
refresh_delta = refresh_every;
do_allocation_disks = config_get_boolean_ondemand("plugin:proc:/sys/fs/btrfs", "physical disks allocation", do_allocation_disks);
diff --git a/src/collectors/profile.plugin/README.md b/src/collectors/profile.plugin/README.md
index 7e3681208..992e6de99 100644
--- a/src/collectors/profile.plugin/README.md
+++ b/src/collectors/profile.plugin/README.md
@@ -4,25 +4,25 @@ This plugin allows someone to backfill an agent with random data.
A user can specify:
- - The number charts they want,
- - the number of dimensions per chart,
- - the desire update every collection frequency,
- - the number of seconds to backfill.
- - the number of collection threads.
+- The number charts they want,
+- the number of dimensions per chart,
+- the desire update every collection frequency,
+- the number of seconds to backfill.
+- the number of collection threads.
## Configuration
-Edit the `netdata.conf` configuration file using [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-netdataconf) from the [Netdata config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory), which is typically at `/etc/netdata`.
+Edit the `netdata.conf` configuration file using [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) from the [Netdata config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory), which is typically at `/etc/netdata`.
Scroll down to the `[plugin:profile]` section to find the available options:
-```
+```text
[plugin:profile]
- update every = 5
- number of charts = 200
- number of dimensions per chart = 5
- seconds to backfill = 86400
- number of threads = 16
+ update every = 5
+ number of charts = 200
+ number of dimensions per chart = 5
+ seconds to backfill = 86400
+ number of threads = 16
```
The `number of threads` option will create the specified number of collection
diff --git a/src/collectors/profile.plugin/plugin_profile.cc b/src/collectors/profile.plugin/plugin_profile.cc
index 390bca29e..14de55db1 100644
--- a/src/collectors/profile.plugin/plugin_profile.cc
+++ b/src/collectors/profile.plugin/plugin_profile.cc
@@ -117,7 +117,7 @@ public:
worker_register_job_custom_metric(WORKER_JOB_METRIC_POINTS_BACKFILLED, "points backfilled", "points", WORKER_METRIC_ABSOLUTE);
heartbeat_t HB;
- heartbeat_init(&HB);
+ heartbeat_init(&HB, UpdateEvery * USEC_PER_SEC);
worker_is_busy(WORKER_JOB_CREATE_CHARTS);
create();
@@ -157,7 +157,7 @@ public:
if (CollectionTV.tv_sec >= NowTV.tv_sec) {
worker_is_idle();
- heartbeat_next(&HB, UpdateEvery * USEC_PER_SEC);
+ heartbeat_next(&HB);
}
}
}
@@ -194,9 +194,11 @@ static void profile_main_cleanup(void *pptr) {
extern "C" void *profile_main(void *ptr) {
CLEANUP_FUNCTION_REGISTER(profile_main_cleanup) cleanup_ptr = ptr;
- int UpdateEvery = (int) config_get_number(CONFIG_SECTION_PROFILE, "update every", 1);
- if (UpdateEvery < localhost->rrd_update_every)
+ int UpdateEvery = (int) config_get_duration_seconds(CONFIG_SECTION_PROFILE, "update every", 1);
+ if (UpdateEvery < localhost->rrd_update_every) {
UpdateEvery = localhost->rrd_update_every;
+ config_set_duration_seconds(CONFIG_SECTION_PROFILE, "update every", UpdateEvery);
+ }
// pick low-default values, in case this plugin is ever enabled accidentaly.
size_t NumThreads = config_get_number(CONFIG_SECTION_PROFILE, "number of threads", 2);
diff --git a/src/collectors/python.d.plugin/README.md b/src/collectors/python.d.plugin/README.md
index 299cebc03..f8d4184e8 100644
--- a/src/collectors/python.d.plugin/README.md
+++ b/src/collectors/python.d.plugin/README.md
@@ -1,22 +1,13 @@
-<!--
-title: "python.d.plugin"
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/collectors/python.d.plugin/README.md"
-sidebar_label: "python.d.plugin"
-learn_status: "Published"
-learn_topic_type: "Tasks"
-learn_rel_path: "Developers/External plugins/python.d.plugin"
--->
-
# python.d.plugin
`python.d.plugin` is a Netdata external plugin. It is an **orchestrator** for data collection modules written in `python`.
-1. It runs as an independent process `ps fax` shows it
-2. It is started and stopped automatically by Netdata
-3. It communicates with Netdata via a unidirectional pipe (sending data to the `netdata` daemon)
-4. Supports any number of data collection **modules**
-5. Allows each **module** to have one or more data collection **jobs**
-6. Each **job** is collecting one or more metrics from a single data source
+1. It runs as an independent process `ps fax` shows it
+2. It is started and stopped automatically by Netdata
+3. It communicates with Netdata via a unidirectional pipe (sending data to the `netdata` daemon)
+4. Supports any number of data collection **modules**
+5. Allows each **module** to have one or more data collection **jobs**
+6. Each **job** is collecting one or more metrics from a single data source
## Disclaimer
@@ -25,7 +16,7 @@ Module configurations are written in YAML and **pyYAML is required**.
Every configuration file must have one of two formats:
-- Configuration for only one job:
+- Configuration for only one job:
```yaml
update_every : 2 # update frequency
@@ -35,7 +26,7 @@ other_var1 : bla # variables passed to module
other_var2 : alb
```
-- Configuration for many jobs (ex. mysql):
+- Configuration for many jobs (ex. mysql):
```yaml
# module defaults:
@@ -55,23 +46,19 @@ other_job:
## How to debug a python module
-```
+```bash
# become user netdata
sudo su -s /bin/bash netdata
```
Depending on where Netdata was installed, execute one of the following commands to trace the execution of a python module:
-```
+```bash
# execute the plugin in debug mode, for a specific module
/opt/netdata/usr/libexec/netdata/plugins.d/python.d.plugin <module> debug trace
/usr/libexec/netdata/plugins.d/python.d.plugin <module> debug trace
```
-Where `[module]` is the directory name under <https://github.com/netdata/netdata/tree/master/src/collectors/python.d.plugin>
+Where `[module]` is the directory name under <https://github.com/netdata/netdata/tree/master/src/collectors/python.d.plugin>
**Note**: If you would like execute a collector in debug mode while it is still running by Netdata, you can pass the `nolock` CLI option to the above commands.
-
-## How to write a new module
-
-See [develop a custom collector in Python](https://github.com/netdata/netdata/edit/master/docs/developer-and-contributor-corner/python-collector.md).
diff --git a/src/collectors/python.d.plugin/am2320/integrations/am2320.md b/src/collectors/python.d.plugin/am2320/integrations/am2320.md
index ea0e505c2..9aaa1153e 100644
--- a/src/collectors/python.d.plugin/am2320/integrations/am2320.md
+++ b/src/collectors/python.d.plugin/am2320/integrations/am2320.md
@@ -106,8 +106,8 @@ Install the Adafruit Circuit Python AM2320 library:
The configuration file name for this integration is `python.d/am2320.conf`.
-You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the
+Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
```bash
cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
diff --git a/src/collectors/python.d.plugin/anomalies/README.md b/src/collectors/python.d.plugin/anomalies/README.md
deleted file mode 100644
index 1d7f8ba1b..000000000
--- a/src/collectors/python.d.plugin/anomalies/README.md
+++ /dev/null
@@ -1,248 +0,0 @@
-<!--
-title: "Anomaly detection with Netdata"
-description: "Use ML-driven anomaly detection to narrow your focus to only affected metrics and services/processes on your node to shorten root cause analysis."
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/collectors/python.d.plugin/anomalies/README.md"
-sidebar_url: "Anomalies"
-sidebar_label: "anomalies"
-learn_status: "Published"
-learn_rel_path: "Integrations/Monitor/Anything"
--->
-
-# Anomaly detection with Netdata
-
-**Note**: Check out the [Netdata Anomaly Advisor](/docs/dashboards-and-charts/anomaly-advisor-tab.md) for a more native anomaly detection experience within Netdata.
-
-This collector uses the Python [PyOD](https://pyod.readthedocs.io/en/latest/index.html) library to perform unsupervised [anomaly detection](https://en.wikipedia.org/wiki/Anomaly_detection) on your Netdata charts and/or dimensions.
-
-Instead of this collector just _collecting_ data, it also does some computation on the data it collects to return an anomaly probability and anomaly flag for each chart or custom model you define. This computation consists of a **train** function that runs every `train_n_secs` to train the ML models to learn what 'normal' typically looks like on your node. At each iteration there is also a **predict** function that uses the latest trained models and most recent metrics to produce an anomaly probability and anomaly flag for each chart or custom model you define.
-
-> As this is a somewhat unique collector and involves often subjective concepts like anomalies and anomaly probabilities, we would love to hear any feedback on it from the community. Please let us know on the [community forum](https://community.netdata.cloud/t/anomalies-collector-feedback-megathread/767) or drop us a note at [analytics-ml-team@netdata.cloud](mailto:analytics-ml-team@netdata.cloud) for any and all feedback, both positive and negative. This sort of feedback is priceless to help us make complex features more useful.
-
-## Charts
-
-Two charts are produced:
-
-- **Anomaly Probability** (`anomalies.probability`): This chart shows the probability that the latest observed data is anomalous based on the trained model for that chart (using the [`predict_proba()`](https://pyod.readthedocs.io/en/latest/api_cc.html#pyod.models.base.BaseDetector.predict_proba) method of the trained PyOD model).
-- **Anomaly** (`anomalies.anomaly`): This chart shows `1` or `0` predictions of if the latest observed data is considered anomalous or not based on the trained model (using the [`predict()`](https://pyod.readthedocs.io/en/latest/api_cc.html#pyod.models.base.BaseDetector.predict) method of the trained PyOD model).
-
-Below is an example of the charts produced by this collector and how they might look when things are 'normal' on the node. The anomaly probabilities tend to bounce randomly around a typically low probability range, one or two might randomly jump or drift outside of this range every now and then and show up as anomalies on the anomaly chart.
-
-![netdata-anomalies-collector-normal](https://user-images.githubusercontent.com/2178292/100663699-99755000-334e-11eb-922f-0c41a0176484.jpg)
-
-If we then go onto the system and run a command like `stress-ng --all 2` to create some [stress](https://wiki.ubuntu.com/Kernel/Reference/stress-ng), we see some charts begin to have anomaly probabilities that jump outside the typical range. When the anomaly probabilities change enough, we will start seeing anomalies being flagged on the `anomalies.anomaly` chart. The idea is that these charts are the most anomalous right now so could be a good place to start your troubleshooting.
-
-![netdata-anomalies-collector-abnormal](https://user-images.githubusercontent.com/2178292/100663710-9bd7aa00-334e-11eb-9d14-76fda73bc309.jpg)
-
-Then, as the issue passes, the anomaly probabilities should settle back down into their 'normal' range again.
-
-![netdata-anomalies-collector-normal-again](https://user-images.githubusercontent.com/2178292/100666681-481a9000-3351-11eb-9979-64728ee2dfb6.jpg)
-
-## Requirements
-
-- This collector will only work with Python 3 and requires the packages below be installed.
-- Typically you will not need to do this, but, if needed, to ensure Python 3 is used you can add the below line to the `[plugin:python.d]` section of `netdata.conf`
-
-```conf
-[plugin:python.d]
- # update every = 1
- command options = -ppython3
-```
-
-Install the required python libraries.
-
-```bash
-# become netdata user
-sudo su -s /bin/bash netdata
-# install required packages for the netdata user
-pip3 install --user netdata-pandas==0.0.38 numba==0.50.1 scikit-learn==0.23.2 pyod==0.8.3
-```
-
-## Configuration
-
-Install the Python requirements above, enable the collector and restart Netdata.
-
-```bash
-cd /etc/netdata/
-sudo ./edit-config python.d.conf
-# Set `anomalies: no` to `anomalies: yes`
-sudo systemctl restart netdata
-```
-
-The configuration for the anomalies collector defines how it will behave on your system and might take some experimentation with over time to set it optimally for your node. Out of the box, the config comes with some [sane defaults](https://www.netdata.cloud/blog/redefining-monitoring-with-netdata/) to get you started that try to balance the flexibility and power of the ML models with the goal of being as cheap as possible in term of cost on the node resources.
-
-_**Note**: If you are unsure about any of the below configuration options then it's best to just ignore all this and leave the `anomalies.conf` file alone to begin with. Then you can return to it later if you would like to tune things a bit more once the collector is running for a while and you have a feeling for its performance on your node._
-
-Edit the `python.d/anomalies.conf` configuration file using `edit-config` from the your agent's [config
-directory](/docs/netdata-agent/configuration/README.md), which is usually at `/etc/netdata`.
-
-```bash
-cd /etc/netdata # Replace this path with your Netdata config directory, if different
-sudo ./edit-config python.d/anomalies.conf
-```
-
-The default configuration should look something like this. Here you can see each parameter (with sane defaults) and some information about each one and what it does.
-
-```conf
-# -
-# JOBS (data collection sources)
-
-# Pull data from local Netdata node.
-anomalies:
- name: 'Anomalies'
-
- # Host to pull data from.
- host: '127.0.0.1:19999'
-
- # Username and Password for Netdata if using basic auth.
- # username: '???'
- # password: '???'
-
- # Use http or https to pull data
- protocol: 'http'
-
- # SSL verify parameter for requests.get() calls
- tls_verify: true
-
- # What charts to pull data for - A regex like 'system\..*|' or 'system\..*|apps.cpu|apps.mem' etc.
- charts_regex: 'system\..*'
-
- # Charts to exclude, useful if you would like to exclude some specific charts.
- # Note: should be a ',' separated string like 'chart.name,chart.name'.
- charts_to_exclude: 'system.uptime,system.entropy'
-
- # What model to use - can be one of 'pca', 'hbos', 'iforest', 'cblof', 'loda', 'copod' or 'feature_bagging'.
- # More details here: https://pyod.readthedocs.io/en/latest/pyod.models.html.
- model: 'pca'
-
- # Max number of observations to train on, to help cap compute cost of training model if you set a very large train_n_secs.
- train_max_n: 100000
-
- # How often to re-train the model (assuming update_every=1 then train_every_n=1800 represents (re)training every 30 minutes).
- # Note: If you want to turn off re-training set train_every_n=0 and after initial training the models will not be retrained.
- train_every_n: 1800
-
- # The length of the window of data to train on (14400 = last 4 hours).
- train_n_secs: 14400
-
- # How many prediction steps after a train event to just use previous prediction value for.
- # Used to reduce possibility of the training step itself appearing as an anomaly on the charts.
- train_no_prediction_n: 10
-
- # If you would like to train the model for the first time on a specific window then you can define it using the below two variables.
- # Start of training data for initial model.
- # initial_train_data_after: 1604578857
-
- # End of training data for initial model.
- # initial_train_data_before: 1604593257
-
- # If you would like to ignore recent data in training then you can offset it by offset_n_secs.
- offset_n_secs: 0
-
- # How many lagged values of each dimension to include in the 'feature vector' each model is trained on.
- lags_n: 5
-
- # How much smoothing to apply to each dimension in the 'feature vector' each model is trained on.
- smooth_n: 3
-
- # How many differences to take in preprocessing your data.
- # More info on differencing here: https://en.wikipedia.org/wiki/Autoregressive_integrated_moving_average#Differencing
- # diffs_n=0 would mean training models on the raw values of each dimension.
- # diffs_n=1 means everything is done in terms of differences.
- diffs_n: 1
-
- # What is the typical proportion of anomalies in your data on average?
- # This parameter can control the sensitivity of your models to anomalies.
- # Some discussion here: https://github.com/yzhao062/pyod/issues/144
- contamination: 0.001
-
- # Set to true to include an "average_prob" dimension on anomalies probability chart which is
- # just the average of all anomaly probabilities at each time step
- include_average_prob: true
-
- # Define any custom models you would like to create anomaly probabilities for, some examples below to show how.
- # For example below example creates two custom models, one to run anomaly detection user and system cpu for our demo servers
- # and one on the cpu and mem apps metrics for the python.d.plugin.
- # custom_models:
- # - name: 'demos_cpu'
- # dimensions: 'london.my-netdata.io::system.cpu|user,london.my-netdata.io::system.cpu|system,newyork.my-netdata.io::system.cpu|user,newyork.my-netdata.io::system.cpu|system'
- # - name: 'apps_python_d_plugin'
- # dimensions: 'apps.cpu|python.d.plugin,apps.mem|python.d.plugin'
-
- # Set to true to normalize, using min-max standardization, features used for the custom models.
- # Useful if your custom models contain dimensions on very different scales an model you use does
- # not internally do its own normalization. Usually best to leave as false.
- # custom_models_normalize: false
-```
-
-## Custom models
-
-In the `anomalies.conf` file you can also define some "custom models" which you can use to group one or more metrics into a single model much like is done by default for the charts you specify. This is useful if you have a handful of metrics that exist in different charts but perhaps are related to the same underlying thing you would like to perform anomaly detection on, for example a specific app or user.
-
-To define a custom model you would include configuration like below in `anomalies.conf`. By default there should already be some commented out examples in there.
-
-`name` is a name you give your custom model, this is what will appear alongside any other specified charts in the `anomalies.probability` and `anomalies.anomaly` charts. `dimensions` is a string of metrics you want to include in your custom model. By default the [netdata-pandas](https://github.com/netdata/netdata-pandas) library used to pull the data from Netdata uses a "chart.a|dim.1" type of naming convention in the pandas columns it returns, hence the `dimensions` string should look like "chart.name|dimension.name,chart.name|dimension.name". The examples below hopefully make this clear.
-
-```yaml
-custom_models:
- # a model for anomaly detection on the netdata user in terms of cpu, mem, threads, processes and sockets.
- - name: 'user_netdata'
- dimensions: 'users.cpu|netdata,users.mem|netdata,users.threads|netdata,users.processes|netdata,users.sockets|netdata'
- # a model for anomaly detection on the netdata python.d.plugin app in terms of cpu, mem, threads, processes and sockets.
- - name: 'apps_python_d_plugin'
- dimensions: 'apps.cpu|python.d.plugin,apps.mem|python.d.plugin,apps.threads|python.d.plugin,apps.processes|python.d.plugin,apps.sockets|python.d.plugin'
-
-custom_models_normalize: false
-```
-
-## Troubleshooting
-
-To see any relevant log messages you can use a command like below.
-
-```bash
-`grep 'anomalies' /var/log/netdata/error.log`
-```
-
-If you would like to log in as `netdata` user and run the collector in debug mode to see more detail.
-
-```bash
-# become netdata user
-sudo su -s /bin/bash netdata
-# run collector in debug using `nolock` option if netdata is already running the collector itself.
-/usr/libexec/netdata/plugins.d/python.d.plugin anomalies debug trace nolock
-```
-
-## Deepdive tutorial
-
-If you would like to go deeper on what exactly the anomalies collector is doing under the hood then check out this [deepdive tutorial](https://github.com/netdata/community/blob/main/netdata-agent-api/netdata-pandas/anomalies_collector_deepdive.ipynb) in our community repo where you can play around with some data from our demo servers (or your own if its accessible to you) and work through the calculations step by step.
-
-(Note: as its a Jupyter Notebook it might render a little prettier on [nbviewer](https://nbviewer.jupyter.org/github/netdata/community/blob/main/netdata-agent-api/netdata-pandas/anomalies_collector_deepdive.ipynb))
-
-## Notes
-
-- Python 3 is required as the [`netdata-pandas`](https://github.com/netdata/netdata-pandas) package uses Python async libraries ([asks](https://pypi.org/project/asks/) and [trio](https://pypi.org/project/trio/)) to make asynchronous calls to the [Netdata REST API](/src/web/api/README.md) to get the required data for each chart.
-- Python 3 is also required for the underlying ML libraries of [numba](https://pypi.org/project/numba/), [scikit-learn](https://pypi.org/project/scikit-learn/), and [PyOD](https://pypi.org/project/pyod/).
-- It may take a few hours or so (depending on your choice of `train_secs_n`) for the collector to 'settle' into it's typical behaviour in terms of the trained models and probabilities you will see in the normal running of your node.
-- As this collector does most of the work in Python itself, with [PyOD](https://pyod.readthedocs.io/en/latest/) leveraging [numba](https://numba.pydata.org/) under the hood, you may want to try it out first on a test or development system to get a sense of its performance characteristics on a node similar to where you would like to use it.
-- `lags_n`, `smooth_n`, and `diffs_n` together define the preprocessing done to the raw data before models are trained and before each prediction. This essentially creates a [feature vector](https://en.wikipedia.org/wiki/Feature_(machine_learning)#:~:text=In%20pattern%20recognition%20and%20machine,features%20that%20represent%20some%20object.&text=Feature%20vectors%20are%20often%20combined,score%20for%20making%20a%20prediction.) for each chart model (or each custom model). The default settings for these parameters aim to create a rolling matrix of recent smoothed [differenced](https://en.wikipedia.org/wiki/Autoregressive_integrated_moving_average#Differencing) values for each chart. The aim of the model then is to score how unusual this 'matrix' of features is for each chart based on what it has learned as 'normal' from the training data. So as opposed to just looking at the single most recent value of a dimension and considering how strange it is, this approach looks at a recent smoothed window of all dimensions for a chart (or dimensions in a custom model) and asks how unusual the data as a whole looks. This should be more flexible in capturing a wider range of [anomaly types](https://andrewm4894.com/2020/10/19/different-types-of-time-series-anomalies/) and be somewhat more robust to temporary 'spikes' in the data that tend to always be happening somewhere in your metrics but often are not the most important type of anomaly (this is all covered in a lot more detail in the [deepdive tutorial](https://nbviewer.jupyter.org/github/netdata/community/blob/main/netdata-agent-api/netdata-pandas/anomalies_collector_deepdive.ipynb)).
-- You can see how long model training is taking by looking in the logs for the collector `grep 'anomalies' /var/log/netdata/error.log | grep 'training'` and you should see lines like `2020-12-01 22:02:14: python.d INFO: anomalies[local] : training complete in 2.81 seconds (runs_counter=2700, model=pca, train_n_secs=14400, models=26, n_fit_success=26, n_fit_fails=0, after=1606845731, before=1606860131).`.
- - This also gives counts of the number of models, if any, that failed to fit and so had to default back to the DefaultModel (which is currently [HBOS](https://pyod.readthedocs.io/en/latest/_modules/pyod/models/hbos.html)).
- - `after` and `before` here refer to the start and end of the training data used to train the models.
-- On a development n1-standard-2 (2 vCPUs, 7.5 GB memory) vm running Ubuntu 18.04 LTS and not doing any work some of the typical performance characteristics we saw from running this collector (with defaults) were:
- - A runtime (`netdata.runtime_anomalies`) of ~80ms when doing scoring and ~3 seconds when training or retraining the models.
- - Typically ~3%-3.5% additional cpu usage from scoring, jumping to ~60% for a couple of seconds during model training.
- - About ~150mb of ram (`apps.mem`) being continually used by the `python.d.plugin`.
-- If you activate this collector on a fresh node, it might take a little while to build up enough data to calculate a realistic and useful model.
-- Some models like `iforest` can be comparatively expensive (on same n1-standard-2 system above ~2s runtime during predict, ~40s training time, ~50% cpu on both train and predict) so if you would like to use it you might be advised to set a relatively high `update_every` maybe 10, 15 or 30 in `anomalies.conf`.
-- Setting a higher `train_every_n` and `update_every` is an easy way to devote less resources on the node to anomaly detection. Specifying less charts and a lower `train_n_secs` will also help reduce resources at the expense of covering less charts and maybe a more noisy model if you set `train_n_secs` to be too small for how your node tends to behave.
-- If you would like to enable this on a Raspberry Pi, then check out [this guide](/docs/developer-and-contributor-corner/raspberry-pi-anomaly-detection.md) which will guide you through first installing LLVM.
-
-## Useful links and further reading
-
-- [PyOD documentation](https://pyod.readthedocs.io/en/latest/), [PyOD Github](https://github.com/yzhao062/pyod).
-- [Anomaly Detection](https://en.wikipedia.org/wiki/Anomaly_detection) wikipedia page.
-- [Anomaly Detection YouTube playlist](https://www.youtube.com/playlist?list=PL6Zhl9mK2r0KxA6rB87oi4kWzoqGd5vp0) maintained by [andrewm4894](https://github.com/andrewm4894/) from Netdata.
-- [awesome-TS-anomaly-detection](https://github.com/rob-med/awesome-TS-anomaly-detection) Github list of useful tools, libraries and resources.
-- [Mendeley public group](https://www.mendeley.com/community/interesting-anomaly-detection-papers/) with some interesting anomaly detection papers we have been reading.
-- Good [blog post](https://www.anodot.com/blog/what-is-anomaly-detection/) from Anodot on time series anomaly detection. Anodot also have some great whitepapers in this space too that some may find useful.
-- Novelty and outlier detection in the [scikit-learn documentation](https://scikit-learn.org/stable/modules/outlier_detection.html).
-
diff --git a/src/collectors/python.d.plugin/anomalies/anomalies.chart.py b/src/collectors/python.d.plugin/anomalies/anomalies.chart.py
deleted file mode 100644
index 24e84cc15..000000000
--- a/src/collectors/python.d.plugin/anomalies/anomalies.chart.py
+++ /dev/null
@@ -1,425 +0,0 @@
-# -*- coding: utf-8 -*-
-# Description: anomalies netdata python.d module
-# Author: andrewm4894
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-import sys
-import time
-from datetime import datetime
-import re
-import warnings
-
-import requests
-import numpy as np
-import pandas as pd
-from netdata_pandas.data import get_data, get_allmetrics_async
-from pyod.models.hbos import HBOS
-from pyod.models.pca import PCA
-from pyod.models.loda import LODA
-from pyod.models.iforest import IForest
-from pyod.models.cblof import CBLOF
-from pyod.models.feature_bagging import FeatureBagging
-from pyod.models.copod import COPOD
-from sklearn.preprocessing import MinMaxScaler
-
-from bases.FrameworkServices.SimpleService import SimpleService
-
-# ignore some sklearn/numpy warnings that are ok
-warnings.filterwarnings('ignore', r'All-NaN slice encountered')
-warnings.filterwarnings('ignore', r'invalid value encountered in true_divide')
-warnings.filterwarnings('ignore', r'divide by zero encountered in true_divide')
-warnings.filterwarnings('ignore', r'invalid value encountered in subtract')
-
-disabled_by_default = True
-
-ORDER = ['probability', 'anomaly']
-
-CHARTS = {
- 'probability': {
- 'options': ['probability', 'Anomaly Probability', 'probability', 'anomalies', 'anomalies.probability', 'line'],
- 'lines': []
- },
- 'anomaly': {
- 'options': ['anomaly', 'Anomaly', 'count', 'anomalies', 'anomalies.anomaly', 'stacked'],
- 'lines': []
- },
-}
-
-
-class Service(SimpleService):
- def __init__(self, configuration=None, name=None):
- SimpleService.__init__(self, configuration=configuration, name=name)
- self.basic_init()
- self.charts_init()
- self.custom_models_init()
- self.data_init()
- self.model_params_init()
- self.models_init()
- self.collected_dims = {'probability': set(), 'anomaly': set()}
-
- def check(self):
- if not (sys.version_info[0] >= 3 and sys.version_info[1] >= 6):
- self.error("anomalies collector only works with Python>=3.6")
- if len(self.host_charts_dict[self.host]) > 0:
- _ = get_allmetrics_async(host_charts_dict=self.host_charts_dict, protocol=self.protocol, user=self.username, pwd=self.password)
- return True
-
- def basic_init(self):
- """Perform some basic initialization.
- """
- self.order = ORDER
- self.definitions = CHARTS
- self.protocol = self.configuration.get('protocol', 'http')
- self.host = self.configuration.get('host', '127.0.0.1:19999')
- self.username = self.configuration.get('username', None)
- self.password = self.configuration.get('password', None)
- self.tls_verify = self.configuration.get('tls_verify', True)
- self.fitted_at = {}
- self.df_allmetrics = pd.DataFrame()
- self.last_train_at = 0
- self.include_average_prob = bool(self.configuration.get('include_average_prob', True))
- self.reinitialize_at_every_step = bool(self.configuration.get('reinitialize_at_every_step', False))
-
- def charts_init(self):
- """Do some initialisation of charts in scope related variables.
- """
- self.charts_regex = re.compile(self.configuration.get('charts_regex','None'))
- self.charts_available = [c for c in list(requests.get(f'{self.protocol}://{self.host}/api/v1/charts', verify=self.tls_verify).json().get('charts', {}).keys())]
- self.charts_in_scope = list(filter(self.charts_regex.match, self.charts_available))
- self.charts_to_exclude = self.configuration.get('charts_to_exclude', '').split(',')
- if len(self.charts_to_exclude) > 0:
- self.charts_in_scope = [c for c in self.charts_in_scope if c not in self.charts_to_exclude]
-
- def custom_models_init(self):
- """Perform initialization steps related to custom models.
- """
- self.custom_models = self.configuration.get('custom_models', None)
- self.custom_models_normalize = bool(self.configuration.get('custom_models_normalize', False))
- if self.custom_models:
- self.custom_models_names = [model['name'] for model in self.custom_models]
- self.custom_models_dims = [i for s in [model['dimensions'].split(',') for model in self.custom_models] for i in s]
- self.custom_models_dims = [dim if '::' in dim else f'{self.host}::{dim}' for dim in self.custom_models_dims]
- self.custom_models_charts = list(set([dim.split('|')[0].split('::')[1] for dim in self.custom_models_dims]))
- self.custom_models_hosts = list(set([dim.split('::')[0] for dim in self.custom_models_dims]))
- self.custom_models_host_charts_dict = {}
- for host in self.custom_models_hosts:
- self.custom_models_host_charts_dict[host] = list(set([dim.split('::')[1].split('|')[0] for dim in self.custom_models_dims if dim.startswith(host)]))
- self.custom_models_dims_renamed = [f"{model['name']}|{dim}" for model in self.custom_models for dim in model['dimensions'].split(',')]
- self.models_in_scope = list(set([f'{self.host}::{c}' for c in self.charts_in_scope] + self.custom_models_names))
- self.charts_in_scope = list(set(self.charts_in_scope + self.custom_models_charts))
- self.host_charts_dict = {self.host: self.charts_in_scope}
- for host in self.custom_models_host_charts_dict:
- if host not in self.host_charts_dict:
- self.host_charts_dict[host] = self.custom_models_host_charts_dict[host]
- else:
- for chart in self.custom_models_host_charts_dict[host]:
- if chart not in self.host_charts_dict[host]:
- self.host_charts_dict[host].extend(chart)
- else:
- self.models_in_scope = [f'{self.host}::{c}' for c in self.charts_in_scope]
- self.host_charts_dict = {self.host: self.charts_in_scope}
- self.model_display_names = {model: model.split('::')[1] if '::' in model else model for model in self.models_in_scope}
- #self.info(f'self.host_charts_dict (len={len(self.host_charts_dict[self.host])}): {self.host_charts_dict}')
-
- def data_init(self):
- """Initialize some empty data objects.
- """
- self.data_probability_latest = {f'{m}_prob': 0 for m in self.charts_in_scope}
- self.data_anomaly_latest = {f'{m}_anomaly': 0 for m in self.charts_in_scope}
- self.data_latest = {**self.data_probability_latest, **self.data_anomaly_latest}
-
- def model_params_init(self):
- """Model parameters initialisation.
- """
- self.train_max_n = self.configuration.get('train_max_n', 100000)
- self.train_n_secs = self.configuration.get('train_n_secs', 14400)
- self.offset_n_secs = self.configuration.get('offset_n_secs', 0)
- self.train_every_n = self.configuration.get('train_every_n', 1800)
- self.train_no_prediction_n = self.configuration.get('train_no_prediction_n', 10)
- self.initial_train_data_after = self.configuration.get('initial_train_data_after', 0)
- self.initial_train_data_before = self.configuration.get('initial_train_data_before', 0)
- self.contamination = self.configuration.get('contamination', 0.001)
- self.lags_n = {model: self.configuration.get('lags_n', 5) for model in self.models_in_scope}
- self.smooth_n = {model: self.configuration.get('smooth_n', 5) for model in self.models_in_scope}
- self.diffs_n = {model: self.configuration.get('diffs_n', 5) for model in self.models_in_scope}
-
- def models_init(self):
- """Models initialisation.
- """
- self.model = self.configuration.get('model', 'pca')
- if self.model == 'pca':
- self.models = {model: PCA(contamination=self.contamination) for model in self.models_in_scope}
- elif self.model == 'loda':
- self.models = {model: LODA(contamination=self.contamination) for model in self.models_in_scope}
- elif self.model == 'iforest':
- self.models = {model: IForest(n_estimators=50, bootstrap=True, behaviour='new', contamination=self.contamination) for model in self.models_in_scope}
- elif self.model == 'cblof':
- self.models = {model: CBLOF(n_clusters=3, contamination=self.contamination) for model in self.models_in_scope}
- elif self.model == 'feature_bagging':
- self.models = {model: FeatureBagging(base_estimator=PCA(contamination=self.contamination), contamination=self.contamination) for model in self.models_in_scope}
- elif self.model == 'copod':
- self.models = {model: COPOD(contamination=self.contamination) for model in self.models_in_scope}
- elif self.model == 'hbos':
- self.models = {model: HBOS(contamination=self.contamination) for model in self.models_in_scope}
- else:
- self.models = {model: HBOS(contamination=self.contamination) for model in self.models_in_scope}
- self.custom_model_scalers = {model: MinMaxScaler() for model in self.models_in_scope}
-
- def model_init(self, model):
- """Model initialisation of a single model.
- """
- if self.model == 'pca':
- self.models[model] = PCA(contamination=self.contamination)
- elif self.model == 'loda':
- self.models[model] = LODA(contamination=self.contamination)
- elif self.model == 'iforest':
- self.models[model] = IForest(n_estimators=50, bootstrap=True, behaviour='new', contamination=self.contamination)
- elif self.model == 'cblof':
- self.models[model] = CBLOF(n_clusters=3, contamination=self.contamination)
- elif self.model == 'feature_bagging':
- self.models[model] = FeatureBagging(base_estimator=PCA(contamination=self.contamination), contamination=self.contamination)
- elif self.model == 'copod':
- self.models[model] = COPOD(contamination=self.contamination)
- elif self.model == 'hbos':
- self.models[model] = HBOS(contamination=self.contamination)
- else:
- self.models[model] = HBOS(contamination=self.contamination)
- self.custom_model_scalers[model] = MinMaxScaler()
-
- def reinitialize(self):
- """Reinitialize charts, models and data to a beginning state.
- """
- self.charts_init()
- self.custom_models_init()
- self.data_init()
- self.model_params_init()
- self.models_init()
-
- def save_data_latest(self, data, data_probability, data_anomaly):
- """Save the most recent data objects to be used if needed in the future.
- """
- self.data_latest = data
- self.data_probability_latest = data_probability
- self.data_anomaly_latest = data_anomaly
-
- def validate_charts(self, chart, data, algorithm='absolute', multiplier=1, divisor=1):
- """If dimension not in chart then add it.
- """
- for dim in data:
- if dim not in self.collected_dims[chart]:
- self.collected_dims[chart].add(dim)
- self.charts[chart].add_dimension([dim, dim, algorithm, multiplier, divisor])
-
- for dim in list(self.collected_dims[chart]):
- if dim not in data:
- self.collected_dims[chart].remove(dim)
- self.charts[chart].del_dimension(dim, hide=False)
-
- def add_custom_models_dims(self, df):
- """Given a df, select columns used by custom models, add custom model name as prefix, and append to df.
-
- :param df <pd.DataFrame>: dataframe to append new renamed columns to.
- :return: <pd.DataFrame> dataframe with additional columns added relating to the specified custom models.
- """
- df_custom = df[self.custom_models_dims].copy()
- df_custom.columns = self.custom_models_dims_renamed
- df = df.join(df_custom)
-
- return df
-
- def make_features(self, arr, train=False, model=None):
- """Take in numpy array and preprocess accordingly by taking diffs, smoothing and adding lags.
-
- :param arr <np.ndarray>: numpy array we want to make features from.
- :param train <bool>: True if making features for training, in which case need to fit_transform scaler and maybe sample train_max_n.
- :param model <str>: model to make features for.
- :return: <np.ndarray> transformed numpy array.
- """
-
- def lag(arr, n):
- res = np.empty_like(arr)
- res[:n] = np.nan
- res[n:] = arr[:-n]
-
- return res
-
- arr = np.nan_to_num(arr)
-
- diffs_n = self.diffs_n[model]
- smooth_n = self.smooth_n[model]
- lags_n = self.lags_n[model]
-
- if self.custom_models_normalize and model in self.custom_models_names:
- if train:
- arr = self.custom_model_scalers[model].fit_transform(arr)
- else:
- arr = self.custom_model_scalers[model].transform(arr)
-
- if diffs_n > 0:
- arr = np.diff(arr, diffs_n, axis=0)
- arr = arr[~np.isnan(arr).any(axis=1)]
-
- if smooth_n > 1:
- arr = np.cumsum(arr, axis=0, dtype=float)
- arr[smooth_n:] = arr[smooth_n:] - arr[:-smooth_n]
- arr = arr[smooth_n - 1:] / smooth_n
- arr = arr[~np.isnan(arr).any(axis=1)]
-
- if lags_n > 0:
- arr_orig = np.copy(arr)
- for lag_n in range(1, lags_n + 1):
- arr = np.concatenate((arr, lag(arr_orig, lag_n)), axis=1)
- arr = arr[~np.isnan(arr).any(axis=1)]
-
- if train:
- if len(arr) > self.train_max_n:
- arr = arr[np.random.randint(arr.shape[0], size=self.train_max_n), :]
-
- arr = np.nan_to_num(arr)
-
- return arr
-
- def train(self, models_to_train=None, train_data_after=0, train_data_before=0):
- """Pull required training data and train a model for each specified model.
-
- :param models_to_train <list>: list of models to train on.
- :param train_data_after <int>: integer timestamp for start of train data.
- :param train_data_before <int>: integer timestamp for end of train data.
- """
- now = datetime.now().timestamp()
- if train_data_after > 0 and train_data_before > 0:
- before = train_data_before
- after = train_data_after
- else:
- before = int(now) - self.offset_n_secs
- after = before - self.train_n_secs
-
- # get training data
- df_train = get_data(
- host_charts_dict=self.host_charts_dict, host_prefix=True, host_sep='::', after=after, before=before,
- sort_cols=True, numeric_only=True, protocol=self.protocol, float_size='float32', user=self.username, pwd=self.password,
- verify=self.tls_verify
- ).ffill()
- if self.custom_models:
- df_train = self.add_custom_models_dims(df_train)
-
- # train model
- self.try_fit(df_train, models_to_train=models_to_train)
- self.info(f'training complete in {round(time.time() - now, 2)} seconds (runs_counter={self.runs_counter}, model={self.model}, train_n_secs={self.train_n_secs}, models={len(self.fitted_at)}, n_fit_success={self.n_fit_success}, n_fit_fails={self.n_fit_fail}, after={after}, before={before}).')
- self.last_train_at = self.runs_counter
-
- def try_fit(self, df_train, models_to_train=None):
- """Try fit each model and try to fallback to a default model if fit fails for any reason.
-
- :param df_train <pd.DataFrame>: data to train on.
- :param models_to_train <list>: list of models to train.
- """
- if models_to_train is None:
- models_to_train = list(self.models.keys())
- self.n_fit_fail, self.n_fit_success = 0, 0
- for model in models_to_train:
- if model not in self.models:
- self.model_init(model)
- X_train = self.make_features(
- df_train[df_train.columns[df_train.columns.str.startswith(f'{model}|')]].values,
- train=True, model=model)
- try:
- self.models[model].fit(X_train)
- self.n_fit_success += 1
- except Exception as e:
- self.n_fit_fail += 1
- self.info(e)
- self.info(f'training failed for {model} at run_counter {self.runs_counter}, defaulting to hbos model.')
- self.models[model] = HBOS(contamination=self.contamination)
- self.models[model].fit(X_train)
- self.fitted_at[model] = self.runs_counter
-
- def predict(self):
- """Get latest data, make it into a feature vector, and get predictions for each available model.
-
- :return: (<dict>,<dict>) tuple of dictionaries, one for probability scores and the other for anomaly predictions.
- """
- # get recent data to predict on
- df_allmetrics = get_allmetrics_async(
- host_charts_dict=self.host_charts_dict, host_prefix=True, host_sep='::', wide=True, sort_cols=True,
- protocol=self.protocol, numeric_only=True, float_size='float32', user=self.username, pwd=self.password
- )
- if self.custom_models:
- df_allmetrics = self.add_custom_models_dims(df_allmetrics)
- self.df_allmetrics = self.df_allmetrics.append(df_allmetrics).ffill().tail((max(self.lags_n.values()) + max(self.smooth_n.values()) + max(self.diffs_n.values())) * 2)
-
- # get predictions
- data_probability, data_anomaly = self.try_predict()
-
- return data_probability, data_anomaly
-
- def try_predict(self):
- """Try make prediction and fall back to last known prediction if fails.
-
- :return: (<dict>,<dict>) tuple of dictionaries, one for probability scores and the other for anomaly predictions.
- """
- data_probability, data_anomaly = {}, {}
- for model in self.fitted_at.keys():
- model_display_name = self.model_display_names[model]
- try:
- X_model = np.nan_to_num(
- self.make_features(
- self.df_allmetrics[self.df_allmetrics.columns[self.df_allmetrics.columns.str.startswith(f'{model}|')]].values,
- model=model
- )[-1,:].reshape(1, -1)
- )
- data_probability[model_display_name + '_prob'] = np.nan_to_num(self.models[model].predict_proba(X_model)[-1][1]) * 10000
- data_anomaly[model_display_name + '_anomaly'] = self.models[model].predict(X_model)[-1]
- except Exception as _:
- #self.info(e)
- if model_display_name + '_prob' in self.data_latest:
- #self.info(f'prediction failed for {model} at run_counter {self.runs_counter}, using last prediction instead.')
- data_probability[model_display_name + '_prob'] = self.data_latest[model_display_name + '_prob']
- data_anomaly[model_display_name + '_anomaly'] = self.data_latest[model_display_name + '_anomaly']
- else:
- #self.info(f'prediction failed for {model} at run_counter {self.runs_counter}, skipping as no previous prediction.')
- continue
-
- return data_probability, data_anomaly
-
- def get_data(self):
-
- # initialize to what's available right now
- if self.reinitialize_at_every_step or len(self.host_charts_dict[self.host]) == 0:
- self.charts_init()
- self.custom_models_init()
- self.model_params_init()
-
- # if not all models have been trained then train those we need to
- if len(self.fitted_at) < len(self.models_in_scope):
- self.train(
- models_to_train=[m for m in self.models_in_scope if m not in self.fitted_at],
- train_data_after=self.initial_train_data_after,
- train_data_before=self.initial_train_data_before
- )
- # retrain all models as per schedule from config
- elif self.train_every_n > 0 and self.runs_counter % self.train_every_n == 0:
- self.reinitialize()
- self.train()
-
- # roll forward previous predictions around a training step to avoid the possibility of having the training itself trigger an anomaly
- if (self.runs_counter - self.last_train_at) <= self.train_no_prediction_n:
- data_probability = self.data_probability_latest
- data_anomaly = self.data_anomaly_latest
- else:
- data_probability, data_anomaly = self.predict()
- if self.include_average_prob:
- average_prob = np.mean(list(data_probability.values()))
- data_probability['average_prob'] = 0 if np.isnan(average_prob) else average_prob
-
- data = {**data_probability, **data_anomaly}
-
- self.validate_charts('probability', data_probability, divisor=100)
- self.validate_charts('anomaly', data_anomaly)
-
- self.save_data_latest(data, data_probability, data_anomaly)
-
- #self.info(f'len(data)={len(data)}')
- #self.info(f'data')
-
- return data
diff --git a/src/collectors/python.d.plugin/anomalies/anomalies.conf b/src/collectors/python.d.plugin/anomalies/anomalies.conf
deleted file mode 100644
index ef867709a..000000000
--- a/src/collectors/python.d.plugin/anomalies/anomalies.conf
+++ /dev/null
@@ -1,184 +0,0 @@
-# netdata python.d.plugin configuration for anomalies
-#
-# This file is in YaML format. Generally the format is:
-#
-# name: value
-#
-# There are 2 sections:
-# - global variables
-# - one or more JOBS
-#
-# JOBS allow you to collect values from multiple sources.
-# Each source will have its own set of charts.
-#
-# JOB parameters have to be indented (using spaces only, example below).
-
-# ----------------------------------------------------------------------
-# Global Variables
-# These variables set the defaults for all JOBs, however each JOB
-# may define its own, overriding the defaults.
-
-# update_every sets the default data collection frequency.
-# If unset, the python.d.plugin default is used.
-# update_every: 2
-
-# priority controls the order of charts at the netdata dashboard.
-# Lower numbers move the charts towards the top of the page.
-# If unset, the default for python.d.plugin is used.
-# priority: 60000
-
-# ----------------------------------------------------------------------
-# JOBS (data collection sources)
-
-# Pull data from local Netdata node.
-anomalies:
- name: 'Anomalies'
-
- # Host to pull data from.
- host: '127.0.0.1:19999'
-
- # Username and Password for Netdata if using basic auth.
- # username: '???'
- # password: '???'
-
- # Use http or https to pull data
- protocol: 'http'
-
- # SSL verify parameter for requests.get() calls
- tls_verify: true
-
- # What charts to pull data for - A regex like 'system\..*|' or 'system\..*|apps.cpu|apps.mem' etc.
- charts_regex: 'system\..*'
-
- # Charts to exclude, useful if you would like to exclude some specific charts.
- # Note: should be a ',' separated string like 'chart.name,chart.name'.
- charts_to_exclude: 'system.uptime,system.entropy'
-
- # What model to use - can be one of 'pca', 'hbos', 'iforest', 'cblof', 'loda', 'copod' or 'feature_bagging'.
- # More details here: https://pyod.readthedocs.io/en/latest/pyod.models.html.
- model: 'pca'
-
- # Max number of observations to train on, to help cap compute cost of training model if you set a very large train_n_secs.
- train_max_n: 100000
-
- # How often to re-train the model (assuming update_every=1 then train_every_n=1800 represents (re)training every 30 minutes).
- # Note: If you want to turn off re-training set train_every_n=0 and after initial training the models will not be retrained.
- train_every_n: 1800
-
- # The length of the window of data to train on (14400 = last 4 hours).
- train_n_secs: 14400
-
- # How many prediction steps after a train event to just use previous prediction value for.
- # Used to reduce possibility of the training step itself appearing as an anomaly on the charts.
- train_no_prediction_n: 10
-
- # If you would like to train the model for the first time on a specific window then you can define it using the below two variables.
- # Start of training data for initial model.
- # initial_train_data_after: 1604578857
-
- # End of training data for initial model.
- # initial_train_data_before: 1604593257
-
- # If you would like to ignore recent data in training then you can offset it by offset_n_secs.
- offset_n_secs: 0
-
- # How many lagged values of each dimension to include in the 'feature vector' each model is trained on.
- lags_n: 5
-
- # How much smoothing to apply to each dimension in the 'feature vector' each model is trained on.
- smooth_n: 3
-
- # How many differences to take in preprocessing your data.
- # More info on differencing here: https://en.wikipedia.org/wiki/Autoregressive_integrated_moving_average#Differencing
- # diffs_n=0 would mean training models on the raw values of each dimension.
- # diffs_n=1 means everything is done in terms of differences.
- diffs_n: 1
-
- # What is the typical proportion of anomalies in your data on average?
- # This parameter can control the sensitivity of your models to anomalies.
- # Some discussion here: https://github.com/yzhao062/pyod/issues/144
- contamination: 0.001
-
- # Set to true to include an "average_prob" dimension on anomalies probability chart which is
- # just the average of all anomaly probabilities at each time step
- include_average_prob: true
-
- # Define any custom models you would like to create anomaly probabilities for, some examples below to show how.
- # For example below example creates two custom models, one to run anomaly detection user and system cpu for our demo servers
- # and one on the cpu and mem apps metrics for the python.d.plugin.
- # custom_models:
- # - name: 'demos_cpu'
- # dimensions: 'london.my-netdata.io::system.cpu|user,london.my-netdata.io::system.cpu|system,newyork.my-netdata.io::system.cpu|user,newyork.my-netdata.io::system.cpu|system'
- # - name: 'apps_python_d_plugin'
- # dimensions: 'apps.cpu|python.d.plugin,apps.mem|python.d.plugin'
-
- # Set to true to normalize, using min-max standardization, features used for the custom models.
- # Useful if your custom models contain dimensions on very different scales an model you use does
- # not internally do its own normalization. Usually best to leave as false.
- # custom_models_normalize: false
-
-# Standalone Custom models example as an additional collector job.
-# custom:
-# name: 'custom'
-# host: '127.0.0.1:19999'
-# protocol: 'http'
-# charts_regex: 'None'
-# charts_to_exclude: 'None'
-# model: 'pca'
-# train_max_n: 100000
-# train_every_n: 1800
-# train_n_secs: 14400
-# offset_n_secs: 0
-# lags_n: 5
-# smooth_n: 3
-# diffs_n: 1
-# contamination: 0.001
-# custom_models:
-# - name: 'user_netdata'
-# dimensions: 'users.cpu|netdata,users.mem|netdata,users.threads|netdata,users.processes|netdata,users.sockets|netdata'
-# - name: 'apps_python_d_plugin'
-# dimensions: 'apps.cpu|python.d.plugin,apps.mem|python.d.plugin,apps.threads|python.d.plugin,apps.processes|python.d.plugin,apps.sockets|python.d.plugin'
-
-# Pull data from some demo nodes for cross node custom models.
-# demos:
-# name: 'demos'
-# host: '127.0.0.1:19999'
-# protocol: 'http'
-# charts_regex: 'None'
-# charts_to_exclude: 'None'
-# model: 'pca'
-# train_max_n: 100000
-# train_every_n: 1800
-# train_n_secs: 14400
-# offset_n_secs: 0
-# lags_n: 5
-# smooth_n: 3
-# diffs_n: 1
-# contamination: 0.001
-# custom_models:
-# - name: 'system.cpu'
-# dimensions: 'london.my-netdata.io::system.cpu|user,london.my-netdata.io::system.cpu|system,newyork.my-netdata.io::system.cpu|user,newyork.my-netdata.io::system.cpu|system'
-# - name: 'system.ip'
-# dimensions: 'london.my-netdata.io::system.ip|received,london.my-netdata.io::system.ip|sent,newyork.my-netdata.io::system.ip|received,newyork.my-netdata.io::system.ip|sent'
-# - name: 'system.net'
-# dimensions: 'london.my-netdata.io::system.net|received,london.my-netdata.io::system.net|sent,newyork.my-netdata.io::system.net|received,newyork.my-netdata.io::system.net|sent'
-# - name: 'system.io'
-# dimensions: 'london.my-netdata.io::system.io|in,london.my-netdata.io::system.io|out,newyork.my-netdata.io::system.io|in,newyork.my-netdata.io::system.io|out'
-
-# Example additional job if you want to also pull data from a child streaming to your
-# local parent or even a remote node so long as the Netdata REST API is accessible.
-# mychildnode1:
-# name: 'mychildnode1'
-# host: '127.0.0.1:19999/host/mychildnode1'
-# protocol: 'http'
-# charts_regex: 'system\..*'
-# charts_to_exclude: 'None'
-# model: 'pca'
-# train_max_n: 100000
-# train_every_n: 1800
-# train_n_secs: 14400
-# offset_n_secs: 0
-# lags_n: 5
-# smooth_n: 3
-# diffs_n: 1
-# contamination: 0.001
diff --git a/src/collectors/python.d.plugin/anomalies/metadata.yaml b/src/collectors/python.d.plugin/anomalies/metadata.yaml
deleted file mode 100644
index c14e47bf4..000000000
--- a/src/collectors/python.d.plugin/anomalies/metadata.yaml
+++ /dev/null
@@ -1,87 +0,0 @@
-# NOTE: this file is commented out as users are reccomended to use the
-# native anomaly detection capabilities on the agent instead.
-# meta:
-# plugin_name: python.d.plugin
-# module_name: anomalies
-# monitored_instance:
-# name: python.d anomalies
-# link: ""
-# categories: []
-# icon_filename: ""
-# related_resources:
-# integrations:
-# list: []
-# info_provided_to_referring_integrations:
-# description: ""
-# keywords: []
-# most_popular: false
-# overview:
-# data_collection:
-# metrics_description: ""
-# method_description: ""
-# supported_platforms:
-# include: []
-# exclude: []
-# multi_instance: true
-# additional_permissions:
-# description: ""
-# default_behavior:
-# auto_detection:
-# description: ""
-# limits:
-# description: ""
-# performance_impact:
-# description: ""
-# setup:
-# prerequisites:
-# list: []
-# configuration:
-# file:
-# name: ""
-# description: ""
-# options:
-# description: ""
-# folding:
-# title: ""
-# enabled: true
-# list: []
-# examples:
-# folding:
-# enabled: true
-# title: ""
-# list: []
-# troubleshooting:
-# problems:
-# list: []
-# alerts:
-# - name: anomalies_anomaly_probabilities
-# link: https://github.com/netdata/netdata/blob/master/src/health/health.d/anomalies.conf
-# metric: anomalies.probability
-# info: average anomaly probability over the last 2 minutes
-# - name: anomalies_anomaly_flags
-# link: https://github.com/netdata/netdata/blob/master/src/health/health.d/anomalies.conf
-# metric: anomalies.anomaly
-# info: number of anomalies in the last 2 minutes
-# metrics:
-# folding:
-# title: Metrics
-# enabled: false
-# description: ""
-# availability: []
-# scopes:
-# - name: global
-# description: ""
-# labels: []
-# metrics:
-# - name: anomalies.probability
-# description: Anomaly Probability
-# unit: "probability"
-# chart_type: line
-# dimensions:
-# - name: a dimension per probability
-# - name: anomalies.anomaly
-# description: Anomaly
-# unit: "count"
-# chart_type: stacked
-# dimensions:
-# - name: a dimension per anomaly
diff --git a/src/collectors/python.d.plugin/boinc/README.md b/src/collectors/python.d.plugin/boinc/README.md
deleted file mode 120000
index 22c10ca17..000000000
--- a/src/collectors/python.d.plugin/boinc/README.md
+++ /dev/null
@@ -1 +0,0 @@
-integrations/boinc.md \ No newline at end of file
diff --git a/src/collectors/python.d.plugin/boinc/boinc.chart.py b/src/collectors/python.d.plugin/boinc/boinc.chart.py
deleted file mode 100644
index a31eda1c2..000000000
--- a/src/collectors/python.d.plugin/boinc/boinc.chart.py
+++ /dev/null
@@ -1,168 +0,0 @@
-# -*- coding: utf-8 -*-
-# Description: BOINC netdata python.d module
-# Author: Austin S. Hemmelgarn (Ferroin)
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-import socket
-
-from bases.FrameworkServices.SimpleService import SimpleService
-from third_party import boinc_client
-
-ORDER = [
- 'tasks',
- 'states',
- 'sched_states',
- 'process_states',
-]
-
-CHARTS = {
- 'tasks': {
- 'options': [None, 'Overall Tasks', 'tasks', 'boinc', 'boinc.tasks', 'line'],
- 'lines': [
- ['total', 'Total', 'absolute', 1, 1],
- ['active', 'Active', 'absolute', 1, 1]
- ]
- },
- 'states': {
- 'options': [None, 'Tasks per State', 'tasks', 'boinc', 'boinc.states', 'line'],
- 'lines': [
- ['new', 'New', 'absolute', 1, 1],
- ['downloading', 'Downloading', 'absolute', 1, 1],
- ['downloaded', 'Ready to Run', 'absolute', 1, 1],
- ['comperror', 'Compute Errors', 'absolute', 1, 1],
- ['uploading', 'Uploading', 'absolute', 1, 1],
- ['uploaded', 'Uploaded', 'absolute', 1, 1],
- ['aborted', 'Aborted', 'absolute', 1, 1],
- ['upload_failed', 'Failed Uploads', 'absolute', 1, 1]
- ]
- },
- 'sched_states': {
- 'options': [None, 'Tasks per Scheduler State', 'tasks', 'boinc', 'boinc.sched', 'line'],
- 'lines': [
- ['uninit_sched', 'Uninitialized', 'absolute', 1, 1],
- ['preempted', 'Preempted', 'absolute', 1, 1],
- ['scheduled', 'Scheduled', 'absolute', 1, 1]
- ]
- },
- 'process_states': {
- 'options': [None, 'Tasks per Process State', 'tasks', 'boinc', 'boinc.process', 'line'],
- 'lines': [
- ['uninit_proc', 'Uninitialized', 'absolute', 1, 1],
- ['executing', 'Executing', 'absolute', 1, 1],
- ['suspended', 'Suspended', 'absolute', 1, 1],
- ['aborting', 'Aborted', 'absolute', 1, 1],
- ['quit', 'Quit', 'absolute', 1, 1],
- ['copy_pending', 'Copy Pending', 'absolute', 1, 1]
- ]
- }
-}
-
-# A simple template used for pre-loading the return dictionary to make
-# the _get_data() method simpler.
-_DATA_TEMPLATE = {
- 'total': 0,
- 'active': 0,
- 'new': 0,
- 'downloading': 0,
- 'downloaded': 0,
- 'comperror': 0,
- 'uploading': 0,
- 'uploaded': 0,
- 'aborted': 0,
- 'upload_failed': 0,
- 'uninit_sched': 0,
- 'preempted': 0,
- 'scheduled': 0,
- 'uninit_proc': 0,
- 'executing': 0,
- 'suspended': 0,
- 'aborting': 0,
- 'quit': 0,
- 'copy_pending': 0
-}
-
-# Map task states to dimensions
-_TASK_MAP = {
- boinc_client.ResultState.NEW: 'new',
- boinc_client.ResultState.FILES_DOWNLOADING: 'downloading',
- boinc_client.ResultState.FILES_DOWNLOADED: 'downloaded',
- boinc_client.ResultState.COMPUTE_ERROR: 'comperror',
- boinc_client.ResultState.FILES_UPLOADING: 'uploading',
- boinc_client.ResultState.FILES_UPLOADED: 'uploaded',
- boinc_client.ResultState.ABORTED: 'aborted',
- boinc_client.ResultState.UPLOAD_FAILED: 'upload_failed'
-}
-
-# Map scheduler states to dimensions
-_SCHED_MAP = {
- boinc_client.CpuSched.UNINITIALIZED: 'uninit_sched',
- boinc_client.CpuSched.PREEMPTED: 'preempted',
- boinc_client.CpuSched.SCHEDULED: 'scheduled',
-}
-
-# Maps process states to dimensions
-_PROC_MAP = {
- boinc_client.Process.UNINITIALIZED: 'uninit_proc',
- boinc_client.Process.EXECUTING: 'executing',
- boinc_client.Process.SUSPENDED: 'suspended',
- boinc_client.Process.ABORT_PENDING: 'aborted',
- boinc_client.Process.QUIT_PENDING: 'quit',
- boinc_client.Process.COPY_PENDING: 'copy_pending'
-}
-
-
-class Service(SimpleService):
- def __init__(self, configuration=None, name=None):
- SimpleService.__init__(self, configuration=configuration, name=name)
- self.order = ORDER
- self.definitions = CHARTS
- self.host = self.configuration.get('host', 'localhost')
- self.port = self.configuration.get('port', 0)
- self.password = self.configuration.get('password', '')
- self.client = boinc_client.BoincClient(host=self.host, port=self.port, passwd=self.password)
- self.alive = False
-
- def check(self):
- return self.connect()
-
- def connect(self):
- self.client.connect()
- self.alive = self.client.connected and self.client.authorized
- return self.alive
-
- def reconnect(self):
- # The client class itself actually disconnects existing
- # connections when it is told to connect, so we don't need to
- # explicitly disconnect when we're just trying to reconnect.
- return self.connect()
-
- def is_alive(self):
- if not self.alive:
- return self.reconnect()
- return True
-
- def _get_data(self):
- if not self.is_alive():
- return None
-
- data = dict(_DATA_TEMPLATE)
-
- try:
- results = self.client.get_tasks()
- except socket.error:
- self.error('Connection is dead')
- self.alive = False
- return None
-
- for task in results:
- data['total'] += 1
- data[_TASK_MAP[task.state]] += 1
- try:
- if task.active_task:
- data['active'] += 1
- data[_SCHED_MAP[task.scheduler_state]] += 1
- data[_PROC_MAP[task.active_task_state]] += 1
- except AttributeError:
- pass
-
- return data or None
diff --git a/src/collectors/python.d.plugin/boinc/boinc.conf b/src/collectors/python.d.plugin/boinc/boinc.conf
deleted file mode 100644
index 16edf55c4..000000000
--- a/src/collectors/python.d.plugin/boinc/boinc.conf
+++ /dev/null
@@ -1,66 +0,0 @@
-# netdata python.d.plugin configuration for boinc
-#
-# This file is in YaML format. Generally the format is:
-#
-# name: value
-#
-# There are 2 sections:
-# - global variables
-# - one or more JOBS
-#
-# JOBS allow you to collect values from multiple sources.
-# Each source will have its own set of charts.
-#
-# JOB parameters have to be indented (using spaces only, example below).
-
-# ----------------------------------------------------------------------
-# Global Variables
-# These variables set the defaults for all JOBs, however each JOB
-# may define its own, overriding the defaults.
-
-# update_every sets the default data collection frequency.
-# If unset, the python.d.plugin default is used.
-# update_every: 1
-
-# priority controls the order of charts at the netdata dashboard.
-# Lower numbers move the charts towards the top of the page.
-# If unset, the default for python.d.plugin is used.
-# priority: 60000
-
-# penalty indicates whether to apply penalty to update_every in case of failures.
-# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
-# penalty: yes
-
-# autodetection_retry sets the job re-check interval in seconds.
-# The job is not deleted if check fails.
-# Attempts to start the job are made once every autodetection_retry.
-# This feature is disabled by default.
-# autodetection_retry: 0
-
-# ----------------------------------------------------------------------
-# JOBS (data collection sources)
-#
-# The default JOBS share the same *name*. JOBS with the same name
-# are mutually exclusive. Only one of them will be allowed running at
-# any time. This allows autodetection to try several alternatives and
-# pick the one that works.
-#
-# Any number of jobs is supported.
-#
-# All python.d.plugin JOBS (for all its modules) support a set of
-# predefined parameters. These are:
-#
-# job_name:
-# name: myname # the JOB's name as it will appear at the
-# # dashboard (by default is the job_name)
-# # JOBs sharing a name are mutually exclusive
-# update_every: 1 # the JOB's data collection frequency
-# priority: 60000 # the JOB's order on the dashboard
-# penalty: yes # the JOB's penalty
-# autodetection_retry: 0 # the JOB's re-check interval in seconds
-#
-# Additionally to the above, boinc also supports the following:
-#
-# hostname: localhost # The host running the BOINC client
-# port: 31416 # The remote GUI RPC port for BOINC
-# password: '' # The remote GUI RPC password
diff --git a/src/collectors/python.d.plugin/boinc/integrations/boinc.md b/src/collectors/python.d.plugin/boinc/integrations/boinc.md
deleted file mode 100644
index d5fcac215..000000000
--- a/src/collectors/python.d.plugin/boinc/integrations/boinc.md
+++ /dev/null
@@ -1,238 +0,0 @@
-<!--startmeta
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/collectors/python.d.plugin/boinc/README.md"
-meta_yaml: "https://github.com/netdata/netdata/edit/master/src/collectors/python.d.plugin/boinc/metadata.yaml"
-sidebar_label: "BOINC"
-learn_status: "Published"
-learn_rel_path: "Collecting Metrics/Distributed Computing Systems"
-most_popular: False
-message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
-endmeta-->
-
-# BOINC
-
-
-<img src="https://netdata.cloud/img/bolt.svg" width="150"/>
-
-
-Plugin: python.d.plugin
-Module: boinc
-
-<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
-
-## Overview
-
-This collector monitors task counts for the Berkeley Open Infrastructure Networking Computing (BOINC) distributed computing client.
-
-It uses the same RPC interface that the BOINC monitoring GUI does.
-
-This collector is supported on all platforms.
-
-This collector supports collecting metrics from multiple instances of this integration, including remote instances.
-
-
-### Default Behavior
-
-#### Auto-Detection
-
-By default, the module will try to auto-detect the password to the RPC interface by looking in `/var/lib/boinc` for this file (this is the location most Linux distributions use for a system-wide BOINC installation), so things may just work without needing configuration for a local system.
-
-#### Limits
-
-The default configuration for this integration does not impose any limits on data collection.
-
-#### Performance Impact
-
-The default configuration for this integration is not expected to impose a significant performance impact on the system.
-
-
-## Metrics
-
-Metrics grouped by *scope*.
-
-The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
-
-
-
-### Per BOINC instance
-
-These metrics refer to the entire monitored application.
-
-This scope has no labels.
-
-Metrics:
-
-| Metric | Dimensions | Unit |
-|:------|:----------|:----|
-| boinc.tasks | Total, Active | tasks |
-| boinc.states | New, Downloading, Ready to Run, Compute Errors, Uploading, Uploaded, Aborted, Failed Uploads | tasks |
-| boinc.sched | Uninitialized, Preempted, Scheduled | tasks |
-| boinc.process | Uninitialized, Executing, Suspended, Aborted, Quit, Copy Pending | tasks |
-
-
-
-## Alerts
-
-
-The following alerts are available:
-
-| Alert name | On metric | Description |
-|:------------|:----------|:------------|
-| [ boinc_total_tasks ](https://github.com/netdata/netdata/blob/master/src/health/health.d/boinc.conf) | boinc.tasks | average number of total tasks over the last 10 minutes |
-| [ boinc_active_tasks ](https://github.com/netdata/netdata/blob/master/src/health/health.d/boinc.conf) | boinc.tasks | average number of active tasks over the last 10 minutes |
-| [ boinc_compute_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/boinc.conf) | boinc.states | average number of compute errors over the last 10 minutes |
-| [ boinc_upload_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/boinc.conf) | boinc.states | average number of failed uploads over the last 10 minutes |
-
-
-## Setup
-
-### Prerequisites
-
-#### Boinc RPC interface
-
-BOINC requires use of a password to access it's RPC interface. You can find this password in the `gui_rpc_auth.cfg` file in your BOINC directory.
-
-
-### Configuration
-
-#### File
-
-The configuration file name for this integration is `python.d/boinc.conf`.
-
-
-You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
-
-```bash
-cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
-sudo ./edit-config python.d/boinc.conf
-```
-#### Options
-
-There are 2 sections:
-
-* Global variables
-* One or more JOBS that can define multiple different instances to monitor.
-
-The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.
-
-Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition.
-
-Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.
-
-
-<details open><summary>Config options</summary>
-
-| Name | Description | Default | Required |
-|:----|:-----------|:-------|:--------:|
-| update_every | Sets the default data collection frequency. | 5 | no |
-| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |
-| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |
-| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |
-| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no |
-| hostname | Define a hostname where boinc is running. | localhost | no |
-| port | The port of boinc RPC interface. | | no |
-| password | Provide a password to connect to a boinc RPC interface. | | no |
-
-</details>
-
-#### Examples
-
-##### Configuration of a remote boinc instance
-
-A basic JOB configuration for a remote boinc instance
-
-```yaml
-remote:
- hostname: '1.2.3.4'
- port: 1234
- password: 'some-password'
-
-```
-##### Multi-instance
-
-> **Note**: When you define multiple jobs, their names must be unique.
-
-Collecting metrics from local and remote instances.
-
-
-<details open><summary>Config</summary>
-
-```yaml
-localhost:
- name: 'local'
- host: '127.0.0.1'
- port: 1234
- password: 'some-password'
-
-remote_job:
- name: 'remote'
- host: '192.0.2.1'
- port: 1234
- password: some-other-password
-
-```
-</details>
-
-
-
-## Troubleshooting
-
-### Debug Mode
-
-
-To troubleshoot issues with the `boinc` collector, run the `python.d.plugin` with the debug option enabled. The output
-should give you clues as to why the collector isn't working.
-
-- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
- your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
-
- ```bash
- cd /usr/libexec/netdata/plugins.d/
- ```
-
-- Switch to the `netdata` user.
-
- ```bash
- sudo -u netdata -s
- ```
-
-- Run the `python.d.plugin` to debug the collector:
-
- ```bash
- ./python.d.plugin boinc debug trace
- ```
-
-### Getting Logs
-
-If you're encountering problems with the `boinc` collector, follow these steps to retrieve logs and identify potential issues:
-
-- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
-- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
-
-#### System with systemd
-
-Use the following command to view logs generated since the last Netdata service restart:
-
-```bash
-journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep boinc
-```
-
-#### System without systemd
-
-Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
-
-```bash
-grep boinc /var/log/netdata/collector.log
-```
-
-**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
-
-#### Docker Container
-
-If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
-
-```bash
-docker logs netdata 2>&1 | grep boinc
-```
-
-
diff --git a/src/collectors/python.d.plugin/boinc/metadata.yaml b/src/collectors/python.d.plugin/boinc/metadata.yaml
deleted file mode 100644
index 9448cbe0f..000000000
--- a/src/collectors/python.d.plugin/boinc/metadata.yaml
+++ /dev/null
@@ -1,198 +0,0 @@
-plugin_name: python.d.plugin
-modules:
- - meta:
- plugin_name: python.d.plugin
- module_name: boinc
- monitored_instance:
- name: BOINC
- link: "https://boinc.berkeley.edu/"
- categories:
- - data-collection.distributed-computing-systems
- icon_filename: "bolt.svg"
- related_resources:
- integrations:
- list: []
- info_provided_to_referring_integrations:
- description: ""
- keywords:
- - boinc
- - distributed
- most_popular: false
- overview:
- data_collection:
- metrics_description: "This collector monitors task counts for the Berkeley Open Infrastructure Networking Computing (BOINC) distributed computing client."
- method_description: "It uses the same RPC interface that the BOINC monitoring GUI does."
- supported_platforms:
- include: []
- exclude: []
- multi_instance: true
- additional_permissions:
- description: ""
- default_behavior:
- auto_detection:
- description: "By default, the module will try to auto-detect the password to the RPC interface by looking in `/var/lib/boinc` for this file (this is the location most Linux distributions use for a system-wide BOINC installation), so things may just work without needing configuration for a local system."
- limits:
- description: ""
- performance_impact:
- description: ""
- setup:
- prerequisites:
- list:
- - title: "Boinc RPC interface"
- description: BOINC requires use of a password to access it's RPC interface. You can find this password in the `gui_rpc_auth.cfg` file in your BOINC directory.
- configuration:
- file:
- name: python.d/boinc.conf
- options:
- description: |
- There are 2 sections:
-
- * Global variables
- * One or more JOBS that can define multiple different instances to monitor.
-
- The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.
-
- Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition.
-
- Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.
- folding:
- title: "Config options"
- enabled: true
- list:
- - name: update_every
- description: Sets the default data collection frequency.
- default_value: 5
- required: false
- - name: priority
- description: Controls the order of charts at the netdata dashboard.
- default_value: 60000
- required: false
- - name: autodetection_retry
- description: Sets the job re-check interval in seconds.
- default_value: 0
- required: false
- - name: penalty
- description: Indicates whether to apply penalty to update_every in case of failures.
- default_value: yes
- required: false
- - name: name
- description: Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works.
- default_value: ""
- required: false
- - name: hostname
- description: Define a hostname where boinc is running.
- default_value: "localhost"
- required: false
- - name: port
- description: The port of boinc RPC interface.
- default_value: ""
- required: false
- - name: password
- description: Provide a password to connect to a boinc RPC interface.
- default_value: ""
- required: false
- examples:
- folding:
- enabled: true
- title: "Config"
- list:
- - name: Configuration of a remote boinc instance
- description: A basic JOB configuration for a remote boinc instance
- folding:
- enabled: false
- config: |
- remote:
- hostname: '1.2.3.4'
- port: 1234
- password: 'some-password'
- - name: Multi-instance
- description: |
- > **Note**: When you define multiple jobs, their names must be unique.
-
- Collecting metrics from local and remote instances.
- config: |
- localhost:
- name: 'local'
- host: '127.0.0.1'
- port: 1234
- password: 'some-password'
-
- remote_job:
- name: 'remote'
- host: '192.0.2.1'
- port: 1234
- password: some-other-password
- troubleshooting:
- problems:
- list: []
- alerts:
- - name: boinc_total_tasks
- link: https://github.com/netdata/netdata/blob/master/src/health/health.d/boinc.conf
- metric: boinc.tasks
- info: average number of total tasks over the last 10 minutes
- os: "*"
- - name: boinc_active_tasks
- link: https://github.com/netdata/netdata/blob/master/src/health/health.d/boinc.conf
- metric: boinc.tasks
- info: average number of active tasks over the last 10 minutes
- os: "*"
- - name: boinc_compute_errors
- link: https://github.com/netdata/netdata/blob/master/src/health/health.d/boinc.conf
- metric: boinc.states
- info: average number of compute errors over the last 10 minutes
- os: "*"
- - name: boinc_upload_errors
- link: https://github.com/netdata/netdata/blob/master/src/health/health.d/boinc.conf
- metric: boinc.states
- info: average number of failed uploads over the last 10 minutes
- os: "*"
- metrics:
- folding:
- title: Metrics
- enabled: false
- description: ""
- availability: []
- scopes:
- - name: global
- description: "These metrics refer to the entire monitored application."
- labels: []
- metrics:
- - name: boinc.tasks
- description: Overall Tasks
- unit: "tasks"
- chart_type: line
- dimensions:
- - name: Total
- - name: Active
- - name: boinc.states
- description: Tasks per State
- unit: "tasks"
- chart_type: line
- dimensions:
- - name: New
- - name: Downloading
- - name: Ready to Run
- - name: Compute Errors
- - name: Uploading
- - name: Uploaded
- - name: Aborted
- - name: Failed Uploads
- - name: boinc.sched
- description: Tasks per Scheduler State
- unit: "tasks"
- chart_type: line
- dimensions:
- - name: Uninitialized
- - name: Preempted
- - name: Scheduled
- - name: boinc.process
- description: Tasks per Process State
- unit: "tasks"
- chart_type: line
- dimensions:
- - name: Uninitialized
- - name: Executing
- - name: Suspended
- - name: Aborted
- - name: Quit
- - name: Copy Pending
diff --git a/src/collectors/python.d.plugin/ceph/README.md b/src/collectors/python.d.plugin/ceph/README.md
deleted file mode 120000
index 654248b70..000000000
--- a/src/collectors/python.d.plugin/ceph/README.md
+++ /dev/null
@@ -1 +0,0 @@
-integrations/ceph.md \ No newline at end of file
diff --git a/src/collectors/python.d.plugin/ceph/ceph.chart.py b/src/collectors/python.d.plugin/ceph/ceph.chart.py
deleted file mode 100644
index 4bcbe1979..000000000
--- a/src/collectors/python.d.plugin/ceph/ceph.chart.py
+++ /dev/null
@@ -1,374 +0,0 @@
-# -*- coding: utf-8 -*-
-# Description: ceph netdata python.d module
-# Author: Luis Eduardo (lets00)
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-try:
- import rados
-
- CEPH = True
-except ImportError:
- CEPH = False
-
-import json
-import os
-
-from bases.FrameworkServices.SimpleService import SimpleService
-
-# default module values (can be overridden per job in `config`)
-update_every = 10
-
-ORDER = [
- 'general_usage',
- 'general_objects',
- 'general_bytes',
- 'general_operations',
- 'general_latency',
- 'pool_usage',
- 'pool_objects',
- 'pool_read_bytes',
- 'pool_write_bytes',
- 'pool_read_operations',
- 'pool_write_operations',
- 'osd_usage',
- 'osd_size',
- 'osd_apply_latency',
- 'osd_commit_latency'
-]
-
-CHARTS = {
- 'general_usage': {
- 'options': [None, 'Ceph General Space', 'KiB', 'general', 'ceph.general_usage', 'stacked'],
- 'lines': [
- ['general_available', 'avail', 'absolute'],
- ['general_usage', 'used', 'absolute']
- ]
- },
- 'general_objects': {
- 'options': [None, 'Ceph General Objects', 'objects', 'general', 'ceph.general_objects', 'area'],
- 'lines': [
- ['general_objects', 'cluster', 'absolute']
- ]
- },
- 'general_bytes': {
- 'options': [None, 'Ceph General Read/Write Data/s', 'KiB/s', 'general', 'ceph.general_bytes',
- 'area'],
- 'lines': [
- ['general_read_bytes', 'read', 'absolute', 1, 1024],
- ['general_write_bytes', 'write', 'absolute', -1, 1024]
- ]
- },
- 'general_operations': {
- 'options': [None, 'Ceph General Read/Write Operations/s', 'operations', 'general', 'ceph.general_operations',
- 'area'],
- 'lines': [
- ['general_read_operations', 'read', 'absolute', 1],
- ['general_write_operations', 'write', 'absolute', -1]
- ]
- },
- 'general_latency': {
- 'options': [None, 'Ceph General Apply/Commit latency', 'milliseconds', 'general', 'ceph.general_latency',
- 'area'],
- 'lines': [
- ['general_apply_latency', 'apply', 'absolute'],
- ['general_commit_latency', 'commit', 'absolute']
- ]
- },
- 'pool_usage': {
- 'options': [None, 'Ceph Pools', 'KiB', 'pool', 'ceph.pool_usage', 'line'],
- 'lines': []
- },
- 'pool_objects': {
- 'options': [None, 'Ceph Pools', 'objects', 'pool', 'ceph.pool_objects', 'line'],
- 'lines': []
- },
- 'pool_read_bytes': {
- 'options': [None, 'Ceph Read Pool Data/s', 'KiB/s', 'pool', 'ceph.pool_read_bytes', 'area'],
- 'lines': []
- },
- 'pool_write_bytes': {
- 'options': [None, 'Ceph Write Pool Data/s', 'KiB/s', 'pool', 'ceph.pool_write_bytes', 'area'],
- 'lines': []
- },
- 'pool_read_operations': {
- 'options': [None, 'Ceph Read Pool Operations/s', 'operations', 'pool', 'ceph.pool_read_operations', 'area'],
- 'lines': []
- },
- 'pool_write_operations': {
- 'options': [None, 'Ceph Write Pool Operations/s', 'operations', 'pool', 'ceph.pool_write_operations', 'area'],
- 'lines': []
- },
- 'osd_usage': {
- 'options': [None, 'Ceph OSDs', 'KiB', 'osd', 'ceph.osd_usage', 'line'],
- 'lines': []
- },
- 'osd_size': {
- 'options': [None, 'Ceph OSDs size', 'KiB', 'osd', 'ceph.osd_size', 'line'],
- 'lines': []
- },
- 'osd_apply_latency': {
- 'options': [None, 'Ceph OSDs apply latency', 'milliseconds', 'osd', 'ceph.apply_latency', 'line'],
- 'lines': []
- },
- 'osd_commit_latency': {
- 'options': [None, 'Ceph OSDs commit latency', 'milliseconds', 'osd', 'ceph.commit_latency', 'line'],
- 'lines': []
- }
-
-}
-
-
-class Service(SimpleService):
- def __init__(self, configuration=None, name=None):
- SimpleService.__init__(self, configuration=configuration, name=name)
- self.order = ORDER
- self.definitions = CHARTS
- self.config_file = self.configuration.get('config_file')
- self.keyring_file = self.configuration.get('keyring_file')
- self.rados_id = self.configuration.get('rados_id', 'admin')
-
- def check(self):
- """
- Checks module
- :return:
- """
- if not CEPH:
- self.error('rados module is needed to use ceph.chart.py')
- return False
- if not (self.config_file and self.keyring_file):
- self.error('config_file and/or keyring_file is not defined')
- return False
-
- # Verify files and permissions
- if not (os.access(self.config_file, os.F_OK)):
- self.error('{0} does not exist'.format(self.config_file))
- return False
- if not (os.access(self.keyring_file, os.F_OK)):
- self.error('{0} does not exist'.format(self.keyring_file))
- return False
- if not (os.access(self.config_file, os.R_OK)):
- self.error('Ceph plugin does not read {0}, define read permission.'.format(self.config_file))
- return False
- if not (os.access(self.keyring_file, os.R_OK)):
- self.error('Ceph plugin does not read {0}, define read permission.'.format(self.keyring_file))
- return False
- try:
- self.cluster = rados.Rados(conffile=self.config_file,
- conf=dict(keyring=self.keyring_file),
- rados_id=self.rados_id)
- self.cluster.connect()
- except rados.Error as error:
- self.error(error)
- return False
- self.create_definitions()
- return True
-
- def create_definitions(self):
- """
- Create dynamically charts options
- :return: None
- """
- # Pool lines
- for pool in sorted(self._get_df()['pools'], key=lambda x: sorted(x.keys())):
- self.definitions['pool_usage']['lines'].append([pool['name'],
- pool['name'],
- 'absolute'])
- self.definitions['pool_objects']['lines'].append(["obj_{0}".format(pool['name']),
- pool['name'],
- 'absolute'])
- self.definitions['pool_read_bytes']['lines'].append(['read_{0}'.format(pool['name']),
- pool['name'],
- 'absolute', 1, 1024])
- self.definitions['pool_write_bytes']['lines'].append(['write_{0}'.format(pool['name']),
- pool['name'],
- 'absolute', 1, 1024])
- self.definitions['pool_read_operations']['lines'].append(['read_operations_{0}'.format(pool['name']),
- pool['name'],
- 'absolute'])
- self.definitions['pool_write_operations']['lines'].append(['write_operations_{0}'.format(pool['name']),
- pool['name'],
- 'absolute'])
-
- # OSD lines
- for osd in sorted(self._get_osd_df()['nodes'], key=lambda x: sorted(x.keys())):
- self.definitions['osd_usage']['lines'].append([osd['name'],
- osd['name'],
- 'absolute'])
- self.definitions['osd_size']['lines'].append(['size_{0}'.format(osd['name']),
- osd['name'],
- 'absolute'])
- self.definitions['osd_apply_latency']['lines'].append(['apply_latency_{0}'.format(osd['name']),
- osd['name'],
- 'absolute'])
- self.definitions['osd_commit_latency']['lines'].append(['commit_latency_{0}'.format(osd['name']),
- osd['name'],
- 'absolute'])
-
- def get_data(self):
- """
- Catch all ceph data
- :return: dict
- """
- try:
- data = {}
- df = self._get_df()
- osd_df = self._get_osd_df()
- osd_perf = self._get_osd_perf()
- osd_perf_infos = get_osd_perf_infos(osd_perf)
- pool_stats = self._get_osd_pool_stats()
-
- data.update(self._get_general(osd_perf_infos, pool_stats))
- for pool in df['pools']:
- data.update(self._get_pool_usage(pool))
- data.update(self._get_pool_objects(pool))
- for pool_io in pool_stats:
- data.update(self._get_pool_rw(pool_io))
- for osd in osd_df['nodes']:
- data.update(self._get_osd_usage(osd))
- data.update(self._get_osd_size(osd))
- for osd_apply_commit in osd_perf_infos:
- data.update(self._get_osd_latency(osd_apply_commit))
- return data
- except (ValueError, AttributeError) as error:
- self.error(error)
- return None
-
- def _get_general(self, osd_perf_infos, pool_stats):
- """
- Get ceph's general usage
- :return: dict
- """
- status = self.cluster.get_cluster_stats()
- read_bytes_sec = 0
- write_bytes_sec = 0
- read_op_per_sec = 0
- write_op_per_sec = 0
- apply_latency = 0
- commit_latency = 0
-
- for pool_rw_io_b in pool_stats:
- read_bytes_sec += pool_rw_io_b['client_io_rate'].get('read_bytes_sec', 0)
- write_bytes_sec += pool_rw_io_b['client_io_rate'].get('write_bytes_sec', 0)
- read_op_per_sec += pool_rw_io_b['client_io_rate'].get('read_op_per_sec', 0)
- write_op_per_sec += pool_rw_io_b['client_io_rate'].get('write_op_per_sec', 0)
- for perf in osd_perf_infos:
- apply_latency += perf['perf_stats']['apply_latency_ms']
- commit_latency += perf['perf_stats']['commit_latency_ms']
-
- return {
- 'general_usage': int(status['kb_used']),
- 'general_available': int(status['kb_avail']),
- 'general_objects': int(status['num_objects']),
- 'general_read_bytes': read_bytes_sec,
- 'general_write_bytes': write_bytes_sec,
- 'general_read_operations': read_op_per_sec,
- 'general_write_operations': write_op_per_sec,
- 'general_apply_latency': apply_latency,
- 'general_commit_latency': commit_latency
- }
-
- @staticmethod
- def _get_pool_usage(pool):
- """
- Process raw data into pool usage dict information
- :return: A pool dict with pool name's key and usage bytes' value
- """
- return {pool['name']: pool['stats']['kb_used']}
-
- @staticmethod
- def _get_pool_objects(pool):
- """
- Process raw data into pool usage dict information
- :return: A pool dict with pool name's key and object numbers
- """
- return {'obj_{0}'.format(pool['name']): pool['stats']['objects']}
-
- @staticmethod
- def _get_pool_rw(pool):
- """
- Get read/write kb and operations in a pool
- :return: A pool dict with both read/write bytes and operations.
- """
- return {
- 'read_{0}'.format(pool['pool_name']): int(pool['client_io_rate'].get('read_bytes_sec', 0)),
- 'write_{0}'.format(pool['pool_name']): int(pool['client_io_rate'].get('write_bytes_sec', 0)),
- 'read_operations_{0}'.format(pool['pool_name']): int(pool['client_io_rate'].get('read_op_per_sec', 0)),
- 'write_operations_{0}'.format(pool['pool_name']): int(pool['client_io_rate'].get('write_op_per_sec', 0))
- }
-
- @staticmethod
- def _get_osd_usage(osd):
- """
- Process raw data into osd dict information to get osd usage
- :return: A osd dict with osd name's key and usage bytes' value
- """
- return {osd['name']: float(osd['kb_used'])}
-
- @staticmethod
- def _get_osd_size(osd):
- """
- Process raw data into osd dict information to get osd size (kb)
- :return: A osd dict with osd name's key and size bytes' value
- """
- return {'size_{0}'.format(osd['name']): float(osd['kb'])}
-
- @staticmethod
- def _get_osd_latency(osd):
- """
- Get ceph osd apply and commit latency
- :return: A osd dict with osd name's key with both apply and commit latency values
- """
- return {
- 'apply_latency_osd.{0}'.format(osd['id']): osd['perf_stats']['apply_latency_ms'],
- 'commit_latency_osd.{0}'.format(osd['id']): osd['perf_stats']['commit_latency_ms']
- }
-
- def _get_df(self):
- """
- Get ceph df output
- :return: ceph df --format json
- """
- return json.loads(self.cluster.mon_command(json.dumps({
- 'prefix': 'df',
- 'format': 'json'
- }), b'')[1].decode('utf-8'))
-
- def _get_osd_df(self):
- """
- Get ceph osd df output
- :return: ceph osd df --format json
- """
- return json.loads(self.cluster.mon_command(json.dumps({
- 'prefix': 'osd df',
- 'format': 'json'
- }), b'')[1].decode('utf-8').replace('-nan', '"-nan"'))
-
- def _get_osd_perf(self):
- """
- Get ceph osd performance
- :return: ceph osd perf --format json
- """
- return json.loads(self.cluster.mon_command(json.dumps({
- 'prefix': 'osd perf',
- 'format': 'json'
- }), b'')[1].decode('utf-8'))
-
- def _get_osd_pool_stats(self):
- """
- Get ceph osd pool status.
- This command is used to get information about both
- read/write operation and bytes per second on each pool
- :return: ceph osd pool stats --format json
- """
- return json.loads(self.cluster.mon_command(json.dumps({
- 'prefix': 'osd pool stats',
- 'format': 'json'
- }), b'')[1].decode('utf-8'))
-
-
-def get_osd_perf_infos(osd_perf):
- # https://github.com/netdata/netdata/issues/8247
- # module uses 'osd_perf_infos' data, its been moved under 'osdstats` since Ceph v14.2
- if 'osd_perf_infos' in osd_perf:
- return osd_perf['osd_perf_infos']
- return osd_perf['osdstats']['osd_perf_infos']
diff --git a/src/collectors/python.d.plugin/ceph/ceph.conf b/src/collectors/python.d.plugin/ceph/ceph.conf
deleted file mode 100644
index 81788e866..000000000
--- a/src/collectors/python.d.plugin/ceph/ceph.conf
+++ /dev/null
@@ -1,75 +0,0 @@
-# netdata python.d.plugin configuration for ceph stats
-#
-# This file is in YaML format. Generally the format is:
-#
-# name: value
-#
-# There are 2 sections:
-# - global variables
-# - one or more JOBS
-#
-# JOBS allow you to collect values from multiple sources.
-# Each source will have its own set of charts.
-#
-# JOB parameters have to be indented (using spaces only, example below).
-
-# ----------------------------------------------------------------------
-# Global Variables
-# These variables set the defaults for all JOBs, however each JOB
-# may define its own, overriding the defaults.
-
-# update_every sets the default data collection frequency.
-# If unset, the python.d.plugin default is used.
-# update_every: 10
-
-# priority controls the order of charts at the netdata dashboard.
-# Lower numbers move the charts towards the top of the page.
-# If unset, the default for python.d.plugin is used.
-# priority: 60000
-
-# penalty indicates whether to apply penalty to update_every in case of failures.
-# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
-# penalty: yes
-
-# autodetection_retry sets the job re-check interval in seconds.
-# The job is not deleted if check fails.
-# Attempts to start the job are made once every autodetection_retry.
-# This feature is disabled by default.
-# autodetection_retry: 0
-
-# ----------------------------------------------------------------------
-# JOBS (data collection sources)
-#
-# The default JOBS share the same *name*. JOBS with the same name
-# are mutually exclusive. Only one of them will be allowed running at
-# any time. This allows autodetection to try several alternatives and
-# pick the one that works.
-#
-# Any number of jobs is supported.
-#
-# All python.d.plugin JOBS (for all its modules) support a set of
-# predefined parameters. These are:
-#
-# job_name:
-# name: myname # the JOB's name as it will appear at the
-# # dashboard (by default is the job_name)
-# # JOBs sharing a name are mutually exclusive
-# update_every: 10 # the JOB's data collection frequency
-# priority: 60000 # the JOB's order on the dashboard
-# penalty: yes # the JOB's penalty
-# autodetection_retry: 0 # the JOB's re-check interval in seconds
-#
-# Additionally to the above, ceph plugin also supports the following:
-#
-# config_file: 'config_file' # Ceph config file.
-# keyring_file: 'keyring_file' # Ceph keyring file. netdata user must be added into ceph group
-# # and keyring file must be read group permission.
-# rados_id: 'rados username' # ID used to connect to ceph cluster. Allows
-# # creating a read only key for pulling data v.s. admin
-# ----------------------------------------------------------------------
-# AUTO-DETECTION JOBS
-# only one of them will run (they have the same name)
-#
-config_file: '/etc/ceph/ceph.conf'
-keyring_file: '/etc/ceph/ceph.client.admin.keyring'
-rados_id: 'admin'
diff --git a/src/collectors/python.d.plugin/ceph/integrations/ceph.md b/src/collectors/python.d.plugin/ceph/integrations/ceph.md
deleted file mode 100644
index d2584a4d0..000000000
--- a/src/collectors/python.d.plugin/ceph/integrations/ceph.md
+++ /dev/null
@@ -1,228 +0,0 @@
-<!--startmeta
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/collectors/python.d.plugin/ceph/README.md"
-meta_yaml: "https://github.com/netdata/netdata/edit/master/src/collectors/python.d.plugin/ceph/metadata.yaml"
-sidebar_label: "Ceph"
-learn_status: "Published"
-learn_rel_path: "Collecting Metrics/Storage, Mount Points and Filesystems"
-most_popular: False
-message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
-endmeta-->
-
-# Ceph
-
-
-<img src="https://netdata.cloud/img/ceph.svg" width="150"/>
-
-
-Plugin: python.d.plugin
-Module: ceph
-
-<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
-
-## Overview
-
-This collector monitors Ceph metrics about Cluster statistics, OSD usage, latency and Pool statistics.
-
-Uses the `rados` python module to connect to a Ceph cluster.
-
-This collector is supported on all platforms.
-
-This collector supports collecting metrics from multiple instances of this integration, including remote instances.
-
-
-### Default Behavior
-
-#### Auto-Detection
-
-This integration doesn't support auto-detection.
-
-#### Limits
-
-The default configuration for this integration does not impose any limits on data collection.
-
-#### Performance Impact
-
-The default configuration for this integration is not expected to impose a significant performance impact on the system.
-
-
-## Metrics
-
-Metrics grouped by *scope*.
-
-The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
-
-
-
-### Per Ceph instance
-
-These metrics refer to the entire monitored application.
-
-This scope has no labels.
-
-Metrics:
-
-| Metric | Dimensions | Unit |
-|:------|:----------|:----|
-| ceph.general_usage | avail, used | KiB |
-| ceph.general_objects | cluster | objects |
-| ceph.general_bytes | read, write | KiB/s |
-| ceph.general_operations | read, write | operations |
-| ceph.general_latency | apply, commit | milliseconds |
-| ceph.pool_usage | a dimension per Ceph Pool | KiB |
-| ceph.pool_objects | a dimension per Ceph Pool | objects |
-| ceph.pool_read_bytes | a dimension per Ceph Pool | KiB/s |
-| ceph.pool_write_bytes | a dimension per Ceph Pool | KiB/s |
-| ceph.pool_read_operations | a dimension per Ceph Pool | operations |
-| ceph.pool_write_operations | a dimension per Ceph Pool | operations |
-| ceph.osd_usage | a dimension per Ceph OSD | KiB |
-| ceph.osd_size | a dimension per Ceph OSD | KiB |
-| ceph.apply_latency | a dimension per Ceph OSD | milliseconds |
-| ceph.commit_latency | a dimension per Ceph OSD | milliseconds |
-
-
-
-## Alerts
-
-
-The following alerts are available:
-
-| Alert name | On metric | Description |
-|:------------|:----------|:------------|
-| [ ceph_cluster_space_usage ](https://github.com/netdata/netdata/blob/master/src/health/health.d/ceph.conf) | ceph.general_usage | cluster disk space utilization |
-
-
-## Setup
-
-### Prerequisites
-
-#### `rados` python module
-
-Make sure the `rados` python module is installed
-
-#### Granting read permissions to ceph group from keyring file
-
-Execute: `chmod 640 /etc/ceph/ceph.client.admin.keyring`
-
-#### Create a specific rados_id
-
-You can optionally create a rados_id to use instead of admin
-
-
-### Configuration
-
-#### File
-
-The configuration file name for this integration is `python.d/ceph.conf`.
-
-
-You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
-
-```bash
-cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
-sudo ./edit-config python.d/ceph.conf
-```
-#### Options
-
-There are 2 sections:
-
-* Global variables
-* One or more JOBS that can define multiple different instances to monitor.
-
-The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.
-
-Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition.
-
-Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.
-
-
-<details open><summary>Config options</summary>
-
-| Name | Description | Default | Required |
-|:----|:-----------|:-------|:--------:|
-| update_every | Sets the default data collection frequency. | 5 | no |
-| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |
-| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |
-| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |
-| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no |
-| config_file | Ceph config file | | yes |
-| keyring_file | Ceph keyring file. netdata user must be added into ceph group and keyring file must be read group permission. | | yes |
-| rados_id | A rados user id to use for connecting to the Ceph cluster. | admin | no |
-
-</details>
-
-#### Examples
-
-##### Basic local Ceph cluster
-
-A basic configuration to connect to a local Ceph cluster.
-
-```yaml
-local:
- config_file: '/etc/ceph/ceph.conf'
- keyring_file: '/etc/ceph/ceph.client.admin.keyring'
-
-```
-
-
-## Troubleshooting
-
-### Debug Mode
-
-
-To troubleshoot issues with the `ceph` collector, run the `python.d.plugin` with the debug option enabled. The output
-should give you clues as to why the collector isn't working.
-
-- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
- your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
-
- ```bash
- cd /usr/libexec/netdata/plugins.d/
- ```
-
-- Switch to the `netdata` user.
-
- ```bash
- sudo -u netdata -s
- ```
-
-- Run the `python.d.plugin` to debug the collector:
-
- ```bash
- ./python.d.plugin ceph debug trace
- ```
-
-### Getting Logs
-
-If you're encountering problems with the `ceph` collector, follow these steps to retrieve logs and identify potential issues:
-
-- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
-- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
-
-#### System with systemd
-
-Use the following command to view logs generated since the last Netdata service restart:
-
-```bash
-journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep ceph
-```
-
-#### System without systemd
-
-Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
-
-```bash
-grep ceph /var/log/netdata/collector.log
-```
-
-**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
-
-#### Docker Container
-
-If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
-
-```bash
-docker logs netdata 2>&1 | grep ceph
-```
-
-
diff --git a/src/collectors/python.d.plugin/ceph/metadata.yaml b/src/collectors/python.d.plugin/ceph/metadata.yaml
deleted file mode 100644
index 642941137..000000000
--- a/src/collectors/python.d.plugin/ceph/metadata.yaml
+++ /dev/null
@@ -1,223 +0,0 @@
-plugin_name: python.d.plugin
-modules:
- - meta:
- plugin_name: python.d.plugin
- module_name: ceph
- monitored_instance:
- name: Ceph
- link: 'https://ceph.io/'
- categories:
- - data-collection.storage-mount-points-and-filesystems
- icon_filename: 'ceph.svg'
- related_resources:
- integrations:
- list: []
- info_provided_to_referring_integrations:
- description: ''
- keywords:
- - ceph
- - storage
- most_popular: false
- overview:
- data_collection:
- metrics_description: 'This collector monitors Ceph metrics about Cluster statistics, OSD usage, latency and Pool statistics.'
- method_description: 'Uses the `rados` python module to connect to a Ceph cluster.'
- supported_platforms:
- include: []
- exclude: []
- multi_instance: true
- additional_permissions:
- description: ''
- default_behavior:
- auto_detection:
- description: ''
- limits:
- description: ''
- performance_impact:
- description: ''
- setup:
- prerequisites:
- list:
- - title: '`rados` python module'
- description: 'Make sure the `rados` python module is installed'
- - title: 'Granting read permissions to ceph group from keyring file'
- description: 'Execute: `chmod 640 /etc/ceph/ceph.client.admin.keyring`'
- - title: 'Create a specific rados_id'
- description: 'You can optionally create a rados_id to use instead of admin'
- configuration:
- file:
- name: python.d/ceph.conf
- options:
- description: |
- There are 2 sections:
-
- * Global variables
- * One or more JOBS that can define multiple different instances to monitor.
-
- The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.
-
- Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition.
-
- Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.
- folding:
- title: "Config options"
- enabled: true
- list:
- - name: update_every
- description: Sets the default data collection frequency.
- default_value: 5
- required: false
- - name: priority
- description: Controls the order of charts at the netdata dashboard.
- default_value: 60000
- required: false
- - name: autodetection_retry
- description: Sets the job re-check interval in seconds.
- default_value: 0
- required: false
- - name: penalty
- description: Indicates whether to apply penalty to update_every in case of failures.
- default_value: yes
- required: false
- - name: name
- description: Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works.
- default_value: ''
- required: false
- - name: config_file
- description: Ceph config file
- default_value: ''
- required: true
- - name: keyring_file
- description: Ceph keyring file. netdata user must be added into ceph group and keyring file must be read group permission.
- default_value: ''
- required: true
- - name: rados_id
- description: A rados user id to use for connecting to the Ceph cluster.
- default_value: 'admin'
- required: false
- examples:
- folding:
- enabled: true
- title: "Config"
- list:
- - name: Basic local Ceph cluster
- description: A basic configuration to connect to a local Ceph cluster.
- folding:
- enabled: false
- config: |
- local:
- config_file: '/etc/ceph/ceph.conf'
- keyring_file: '/etc/ceph/ceph.client.admin.keyring'
- troubleshooting:
- problems:
- list: []
- alerts:
- - name: ceph_cluster_space_usage
- link: https://github.com/netdata/netdata/blob/master/src/health/health.d/ceph.conf
- metric: ceph.general_usage
- info: cluster disk space utilization
- metrics:
- folding:
- title: Metrics
- enabled: false
- description: ""
- availability: []
- scopes:
- - name: global
- description: "These metrics refer to the entire monitored application."
- labels: []
- metrics:
- - name: ceph.general_usage
- description: Ceph General Space
- unit: "KiB"
- chart_type: stacked
- dimensions:
- - name: avail
- - name: used
- - name: ceph.general_objects
- description: Ceph General Objects
- unit: "objects"
- chart_type: area
- dimensions:
- - name: cluster
- - name: ceph.general_bytes
- description: Ceph General Read/Write Data/s
- unit: "KiB/s"
- chart_type: area
- dimensions:
- - name: read
- - name: write
- - name: ceph.general_operations
- description: Ceph General Read/Write Operations/s
- unit: "operations"
- chart_type: area
- dimensions:
- - name: read
- - name: write
- - name: ceph.general_latency
- description: Ceph General Apply/Commit latency
- unit: "milliseconds"
- chart_type: area
- dimensions:
- - name: apply
- - name: commit
- - name: ceph.pool_usage
- description: Ceph Pools
- unit: "KiB"
- chart_type: line
- dimensions:
- - name: a dimension per Ceph Pool
- - name: ceph.pool_objects
- description: Ceph Pools
- unit: "objects"
- chart_type: line
- dimensions:
- - name: a dimension per Ceph Pool
- - name: ceph.pool_read_bytes
- description: Ceph Read Pool Data/s
- unit: "KiB/s"
- chart_type: area
- dimensions:
- - name: a dimension per Ceph Pool
- - name: ceph.pool_write_bytes
- description: Ceph Write Pool Data/s
- unit: "KiB/s"
- chart_type: area
- dimensions:
- - name: a dimension per Ceph Pool
- - name: ceph.pool_read_operations
- description: Ceph Read Pool Operations/s
- unit: "operations"
- chart_type: area
- dimensions:
- - name: a dimension per Ceph Pool
- - name: ceph.pool_write_operations
- description: Ceph Write Pool Operations/s
- unit: "operations"
- chart_type: area
- dimensions:
- - name: a dimension per Ceph Pool
- - name: ceph.osd_usage
- description: Ceph OSDs
- unit: "KiB"
- chart_type: line
- dimensions:
- - name: a dimension per Ceph OSD
- - name: ceph.osd_size
- description: Ceph OSDs size
- unit: "KiB"
- chart_type: line
- dimensions:
- - name: a dimension per Ceph OSD
- - name: ceph.apply_latency
- description: Ceph OSDs apply latency
- unit: "milliseconds"
- chart_type: line
- dimensions:
- - name: a dimension per Ceph OSD
- - name: ceph.commit_latency
- description: Ceph OSDs commit latency
- unit: "milliseconds"
- chart_type: line
- dimensions:
- - name: a dimension per Ceph OSD
diff --git a/src/collectors/python.d.plugin/go_expvar/integrations/go_applications_expvar.md b/src/collectors/python.d.plugin/go_expvar/integrations/go_applications_expvar.md
index 8f086765e..103058db8 100644
--- a/src/collectors/python.d.plugin/go_expvar/integrations/go_applications_expvar.md
+++ b/src/collectors/python.d.plugin/go_expvar/integrations/go_applications_expvar.md
@@ -84,14 +84,14 @@ There are no alerts configured by default for this integration.
#### Enable the go_expvar collector
-The `go_expvar` collector is disabled by default. To enable it, use `edit-config` from the Netdata [config directory](/docs/netdata-agent/configuration/README.md), which is typically at `/etc/netdata`, to edit the `python.d.conf` file.
+The `go_expvar` collector is disabled by default. To enable it, use `edit-config` from the Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md), which is typically at `/etc/netdata`, to edit the `python.d.conf` file.
```bash
cd /etc/netdata # Replace this path with your Netdata config directory, if different
sudo ./edit-config python.d.conf
```
-Change the value of the `go_expvar` setting to `yes`. Save the file and restart the Netdata Agent with `sudo systemctl restart netdata`, or the [appropriate method](/packaging/installer/README.md#maintaining-a-netdata-agent-installation) for your system.
+Change the value of the `go_expvar` setting to `yes`. Save the file and restart the Netdata Agent with `sudo systemctl restart netdata`, or the [appropriate method](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/start-stop-restart.md) for your system.
#### Sample `expvar` usage in a Go application
@@ -171,8 +171,8 @@ number of currently running Goroutines and updates these stats every second.
The configuration file name for this integration is `python.d/go_expvar.conf`.
-You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the
+Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
```bash
cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
@@ -231,8 +231,8 @@ See [this issue](https://github.com/netdata/netdata/pull/1902#issuecomment-28449
Please see these two links to the official Netdata documentation for more information about the values:
-- [External plugins - charts](/src/collectors/plugins.d/README.md#chart)
-- [Chart variables](/src/collectors/python.d.plugin/README.md#global-variables-order-and-chart)
+- [External plugins - charts](https://github.com/netdata/netdata/blob/master/src/plugins.d/README.md#chart)
+- [Chart variables](https://github.com/netdata/netdata/blob/master/src/collectors/python.d.plugin/README.md#global-variables-order-and-chart)
**Line definitions**
@@ -255,7 +255,7 @@ hidden: False
```
Please see the following link for more information about the options and their default values:
-[External plugins - dimensions](/src/collectors/plugins.d/README.md#dimension)
+[External plugins - dimensions](https://github.com/netdata/netdata/blob/master/src/plugins.d/README.md#dimension)
Apart from top-level expvars, this plugin can also parse expvars stored in a multi-level map;
All dicts in the resulting JSON document are then flattened to one level.
diff --git a/src/collectors/python.d.plugin/go_expvar/metadata.yaml b/src/collectors/python.d.plugin/go_expvar/metadata.yaml
index aa45968ff..b91225e9a 100644
--- a/src/collectors/python.d.plugin/go_expvar/metadata.yaml
+++ b/src/collectors/python.d.plugin/go_expvar/metadata.yaml
@@ -48,7 +48,7 @@ modules:
sudo ./edit-config python.d.conf
```
- Change the value of the `go_expvar` setting to `yes`. Save the file and restart the Netdata Agent with `sudo systemctl restart netdata`, or the [appropriate method](/packaging/installer/README.md#maintaining-a-netdata-agent-installation) for your system.
+ Change the value of the `go_expvar` setting to `yes`. Save the file and restart the Netdata Agent with `sudo systemctl restart netdata`, or the [appropriate method](/docs/netdata-agent/start-stop-restart.md) for your system.
- title: "Sample `expvar` usage in a Go application"
description: |
The `expvar` package exposes metrics over HTTP and is very easy to use.
@@ -200,7 +200,7 @@ modules:
Please see these two links to the official Netdata documentation for more information about the values:
- - [External plugins - charts](/src/collectors/plugins.d/README.md#chart)
+ - [External plugins - charts](/src/plugins.d/README.md#chart)
- [Chart variables](/src/collectors/python.d.plugin/README.md#global-variables-order-and-chart)
**Line definitions**
@@ -224,7 +224,7 @@ modules:
```
Please see the following link for more information about the options and their default values:
- [External plugins - dimensions](/src/collectors/plugins.d/README.md#dimension)
+ [External plugins - dimensions](/src/plugins.d/README.md#dimension)
Apart from top-level expvars, this plugin can also parse expvars stored in a multi-level map;
All dicts in the resulting JSON document are then flattened to one level.
diff --git a/src/collectors/python.d.plugin/haproxy/README.md b/src/collectors/python.d.plugin/haproxy/README.md
index 8ade512bb..bc54d8638 100644
--- a/src/collectors/python.d.plugin/haproxy/README.md
+++ b/src/collectors/python.d.plugin/haproxy/README.md
@@ -1,12 +1,3 @@
-<!--
-title: "HAProxy monitoring with Netdata"
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/collectors/python.d.plugin/haproxy/README.md"
-sidebar_label: "haproxy-python.d.plugin"
-learn_status: "Published"
-learn_topic_type: "References"
-learn_rel_path: "Integrations/Monitor/Webapps"
--->
-
# HAProxy collector
Monitors frontend and backend metrics such as bytes in, bytes out, sessions current, sessions in queue current.
diff --git a/src/collectors/python.d.plugin/openldap/README.md b/src/collectors/python.d.plugin/openldap/README.md
deleted file mode 120000
index 45f36b9b9..000000000
--- a/src/collectors/python.d.plugin/openldap/README.md
+++ /dev/null
@@ -1 +0,0 @@
-integrations/openldap.md \ No newline at end of file
diff --git a/src/collectors/python.d.plugin/openldap/integrations/openldap.md b/src/collectors/python.d.plugin/openldap/integrations/openldap.md
deleted file mode 100644
index 3f363343a..000000000
--- a/src/collectors/python.d.plugin/openldap/integrations/openldap.md
+++ /dev/null
@@ -1,249 +0,0 @@
-<!--startmeta
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/collectors/python.d.plugin/openldap/README.md"
-meta_yaml: "https://github.com/netdata/netdata/edit/master/src/collectors/python.d.plugin/openldap/metadata.yaml"
-sidebar_label: "OpenLDAP"
-learn_status: "Published"
-learn_rel_path: "Collecting Metrics/Authentication and Authorization"
-most_popular: False
-message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
-endmeta-->
-
-# OpenLDAP
-
-
-<img src="https://netdata.cloud/img/statsd.png" width="150"/>
-
-
-Plugin: python.d.plugin
-Module: openldap
-
-<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
-
-## Overview
-
-This collector monitors OpenLDAP metrics about connections, operations, referrals and more.
-
-Statistics are taken from the monitoring interface of a openLDAP (slapd) server
-
-
-This collector is supported on all platforms.
-
-This collector only supports collecting metrics from a single instance of this integration.
-
-
-### Default Behavior
-
-#### Auto-Detection
-
-This collector doesn't work until all the prerequisites are checked.
-
-
-#### Limits
-
-The default configuration for this integration does not impose any limits on data collection.
-
-#### Performance Impact
-
-The default configuration for this integration is not expected to impose a significant performance impact on the system.
-
-
-## Metrics
-
-Metrics grouped by *scope*.
-
-The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
-
-
-
-### Per OpenLDAP instance
-
-These metrics refer to the entire monitored application.
-
-This scope has no labels.
-
-Metrics:
-
-| Metric | Dimensions | Unit |
-|:------|:----------|:----|
-| openldap.total_connections | connections | connections/s |
-| openldap.traffic_stats | sent | KiB/s |
-| openldap.operations_status | completed, initiated | ops/s |
-| openldap.referrals | sent | referrals/s |
-| openldap.entries | sent | entries/s |
-| openldap.ldap_operations | bind, search, unbind, add, delete, modify, compare | ops/s |
-| openldap.waiters | write, read | waiters/s |
-
-
-
-## Alerts
-
-There are no alerts configured by default for this integration.
-
-
-## Setup
-
-### Prerequisites
-
-#### Configure the openLDAP server to expose metrics to monitor it.
-
-Follow instructions from https://www.openldap.org/doc/admin24/monitoringslapd.html to activate monitoring interface.
-
-
-#### Install python-ldap module
-
-Install python ldap module
-
-1. From pip package manager
-
-```bash
-pip install ldap
-```
-
-2. With apt package manager (in most deb based distros)
-
-
-```bash
-apt-get install python-ldap
-```
-
-
-3. With yum package manager (in most rpm based distros)
-
-
-```bash
-yum install python-ldap
-```
-
-
-#### Insert credentials for Netdata to access openLDAP server
-
-Use the `ldappasswd` utility to set a password for the username you will use.
-
-
-
-### Configuration
-
-#### File
-
-The configuration file name for this integration is `python.d/openldap.conf`.
-
-
-You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
-
-```bash
-cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
-sudo ./edit-config python.d/openldap.conf
-```
-#### Options
-
-There are 2 sections:
-
-* Global variables
-* One or more JOBS that can define multiple different instances to monitor.
-
-The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.
-
-Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition.
-
-Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.
-
-
-<details open><summary>Config options</summary>
-
-| Name | Description | Default | Required |
-|:----|:-----------|:-------|:--------:|
-| update_every | Sets the default data collection frequency. | 5 | no |
-| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |
-| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |
-| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |
-| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no |
-| username | The bind user with right to access monitor statistics | | yes |
-| password | The password for the binded user | | yes |
-| server | The listening address of the LDAP server. In case of TLS, use the hostname which the certificate is published for. | | yes |
-| port | The listening port of the LDAP server. Change to 636 port in case of TLS connection. | 389 | yes |
-| use_tls | Make True if a TLS connection is used over ldaps:// | no | no |
-| use_start_tls | Make True if a TLS connection is used over ldap:// | no | no |
-| cert_check | False if you want to ignore certificate check | True | yes |
-| timeout | Seconds to timeout if no connection exist | | yes |
-
-</details>
-
-#### Examples
-
-##### Basic
-
-A basic example configuration.
-
-```yaml
-username: "cn=admin"
-password: "pass"
-server: "localhost"
-port: "389"
-check_cert: True
-timeout: 1
-
-```
-
-
-## Troubleshooting
-
-### Debug Mode
-
-
-To troubleshoot issues with the `openldap` collector, run the `python.d.plugin` with the debug option enabled. The output
-should give you clues as to why the collector isn't working.
-
-- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
- your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
-
- ```bash
- cd /usr/libexec/netdata/plugins.d/
- ```
-
-- Switch to the `netdata` user.
-
- ```bash
- sudo -u netdata -s
- ```
-
-- Run the `python.d.plugin` to debug the collector:
-
- ```bash
- ./python.d.plugin openldap debug trace
- ```
-
-### Getting Logs
-
-If you're encountering problems with the `openldap` collector, follow these steps to retrieve logs and identify potential issues:
-
-- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
-- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
-
-#### System with systemd
-
-Use the following command to view logs generated since the last Netdata service restart:
-
-```bash
-journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep openldap
-```
-
-#### System without systemd
-
-Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
-
-```bash
-grep openldap /var/log/netdata/collector.log
-```
-
-**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
-
-#### Docker Container
-
-If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
-
-```bash
-docker logs netdata 2>&1 | grep openldap
-```
-
-
diff --git a/src/collectors/python.d.plugin/openldap/metadata.yaml b/src/collectors/python.d.plugin/openldap/metadata.yaml
deleted file mode 100644
index 3826b22c7..000000000
--- a/src/collectors/python.d.plugin/openldap/metadata.yaml
+++ /dev/null
@@ -1,225 +0,0 @@
-plugin_name: python.d.plugin
-modules:
- - meta:
- plugin_name: python.d.plugin
- module_name: openldap
- monitored_instance:
- name: OpenLDAP
- link: "https://www.openldap.org/"
- categories:
- - data-collection.authentication-and-authorization
- icon_filename: "statsd.png"
- related_resources:
- integrations:
- list: []
- info_provided_to_referring_integrations:
- description: ""
- keywords:
- - openldap
- - RBAC
- - Directory access
- most_popular: false
- overview:
- data_collection:
- metrics_description: "This collector monitors OpenLDAP metrics about connections, operations, referrals and more."
- method_description: |
- Statistics are taken from the monitoring interface of a openLDAP (slapd) server
- supported_platforms:
- include: []
- exclude: []
- multi_instance: false
- additional_permissions:
- description: ""
- default_behavior:
- auto_detection:
- description: |
- This collector doesn't work until all the prerequisites are checked.
- limits:
- description: ""
- performance_impact:
- description: ""
- setup:
- prerequisites:
- list:
- - title: Configure the openLDAP server to expose metrics to monitor it.
- description: |
- Follow instructions from https://www.openldap.org/doc/admin24/monitoringslapd.html to activate monitoring interface.
- - title: Install python-ldap module
- description: |
- Install python ldap module
-
- 1. From pip package manager
-
- ```bash
- pip install ldap
- ```
-
- 2. With apt package manager (in most deb based distros)
-
-
- ```bash
- apt-get install python-ldap
- ```
-
-
- 3. With yum package manager (in most rpm based distros)
-
-
- ```bash
- yum install python-ldap
- ```
- - title: Insert credentials for Netdata to access openLDAP server
- description: |
- Use the `ldappasswd` utility to set a password for the username you will use.
- configuration:
- file:
- name: "python.d/openldap.conf"
- options:
- description: |
- There are 2 sections:
-
- * Global variables
- * One or more JOBS that can define multiple different instances to monitor.
-
- The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.
-
- Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition.
-
- Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.
- folding:
- title: "Config options"
- enabled: true
- list:
- - name: update_every
- description: Sets the default data collection frequency.
- default_value: 5
- required: false
- - name: priority
- description: Controls the order of charts at the netdata dashboard.
- default_value: 60000
- required: false
- - name: autodetection_retry
- description: Sets the job re-check interval in seconds.
- default_value: 0
- required: false
- - name: penalty
- description: Indicates whether to apply penalty to update_every in case of failures.
- default_value: yes
- required: false
- - name: name
- description: Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works.
- default_value: ""
- required: false
- - name: username
- description: The bind user with right to access monitor statistics
- default_value: ""
- required: true
- - name: password
- description: The password for the binded user
- default_value: ""
- required: true
- - name: server
- description: The listening address of the LDAP server. In case of TLS, use the hostname which the certificate is published for.
- default_value: ""
- required: true
- - name: port
- description: The listening port of the LDAP server. Change to 636 port in case of TLS connection.
- default_value: "389"
- required: true
- - name: use_tls
- description: Make True if a TLS connection is used over ldaps://
- default_value: False
- required: false
- - name: use_start_tls
- description: Make True if a TLS connection is used over ldap://
- default_value: False
- required: false
- - name: cert_check
- description: False if you want to ignore certificate check
- default_value: "True"
- required: true
- - name: timeout
- description: Seconds to timeout if no connection exist
- default_value: ""
- required: true
- examples:
- folding:
- enabled: true
- title: "Config"
- list:
- - name: Basic
- description: A basic example configuration.
- folding:
- enabled: false
- config: |
- username: "cn=admin"
- password: "pass"
- server: "localhost"
- port: "389"
- check_cert: True
- timeout: 1
- troubleshooting:
- problems:
- list: []
- alerts: []
- metrics:
- folding:
- title: Metrics
- enabled: false
- description: ""
- availability: []
- scopes:
- - name: global
- description: "These metrics refer to the entire monitored application."
- labels: []
- metrics:
- - name: openldap.total_connections
- description: Total Connections
- unit: "connections/s"
- chart_type: line
- dimensions:
- - name: connections
- - name: openldap.traffic_stats
- description: Traffic
- unit: "KiB/s"
- chart_type: line
- dimensions:
- - name: sent
- - name: openldap.operations_status
- description: Operations Status
- unit: "ops/s"
- chart_type: line
- dimensions:
- - name: completed
- - name: initiated
- - name: openldap.referrals
- description: Referrals
- unit: "referrals/s"
- chart_type: line
- dimensions:
- - name: sent
- - name: openldap.entries
- description: Entries
- unit: "entries/s"
- chart_type: line
- dimensions:
- - name: sent
- - name: openldap.ldap_operations
- description: Operations
- unit: "ops/s"
- chart_type: line
- dimensions:
- - name: bind
- - name: search
- - name: unbind
- - name: add
- - name: delete
- - name: modify
- - name: compare
- - name: openldap.waiters
- description: Waiters
- unit: "waiters/s"
- chart_type: line
- dimensions:
- - name: write
- - name: read
diff --git a/src/collectors/python.d.plugin/openldap/openldap.chart.py b/src/collectors/python.d.plugin/openldap/openldap.chart.py
deleted file mode 100644
index aba143954..000000000
--- a/src/collectors/python.d.plugin/openldap/openldap.chart.py
+++ /dev/null
@@ -1,216 +0,0 @@
-# -*- coding: utf-8 -*-
-# Description: openldap netdata python.d module
-# Author: Manolis Kartsonakis (ekartsonakis)
-# SPDX-License-Identifier: GPL-3.0+
-
-try:
- import ldap
-
- HAS_LDAP = True
-except ImportError:
- HAS_LDAP = False
-
-from bases.FrameworkServices.SimpleService import SimpleService
-
-DEFAULT_SERVER = 'localhost'
-DEFAULT_PORT = '389'
-DEFAULT_TLS = False
-DEFAULT_CERT_CHECK = True
-DEFAULT_TIMEOUT = 1
-DEFAULT_START_TLS = False
-
-ORDER = [
- 'total_connections',
- 'bytes_sent',
- 'operations',
- 'referrals_sent',
- 'entries_sent',
- 'ldap_operations',
- 'waiters'
-]
-
-CHARTS = {
- 'total_connections': {
- 'options': [None, 'Total Connections', 'connections/s', 'ldap', 'openldap.total_connections', 'line'],
- 'lines': [
- ['total_connections', 'connections', 'incremental']
- ]
- },
- 'bytes_sent': {
- 'options': [None, 'Traffic', 'KiB/s', 'ldap', 'openldap.traffic_stats', 'line'],
- 'lines': [
- ['bytes_sent', 'sent', 'incremental', 1, 1024]
- ]
- },
- 'operations': {
- 'options': [None, 'Operations Status', 'ops/s', 'ldap', 'openldap.operations_status', 'line'],
- 'lines': [
- ['completed_operations', 'completed', 'incremental'],
- ['initiated_operations', 'initiated', 'incremental']
- ]
- },
- 'referrals_sent': {
- 'options': [None, 'Referrals', 'referrals/s', 'ldap', 'openldap.referrals', 'line'],
- 'lines': [
- ['referrals_sent', 'sent', 'incremental']
- ]
- },
- 'entries_sent': {
- 'options': [None, 'Entries', 'entries/s', 'ldap', 'openldap.entries', 'line'],
- 'lines': [
- ['entries_sent', 'sent', 'incremental']
- ]
- },
- 'ldap_operations': {
- 'options': [None, 'Operations', 'ops/s', 'ldap', 'openldap.ldap_operations', 'line'],
- 'lines': [
- ['bind_operations', 'bind', 'incremental'],
- ['search_operations', 'search', 'incremental'],
- ['unbind_operations', 'unbind', 'incremental'],
- ['add_operations', 'add', 'incremental'],
- ['delete_operations', 'delete', 'incremental'],
- ['modify_operations', 'modify', 'incremental'],
- ['compare_operations', 'compare', 'incremental']
- ]
- },
- 'waiters': {
- 'options': [None, 'Waiters', 'waiters/s', 'ldap', 'openldap.waiters', 'line'],
- 'lines': [
- ['write_waiters', 'write', 'incremental'],
- ['read_waiters', 'read', 'incremental']
- ]
- },
-}
-
-# Stuff to gather - make tuples of DN dn and attrib to get
-SEARCH_LIST = {
- 'total_connections': (
- 'cn=Total,cn=Connections,cn=Monitor', 'monitorCounter',
- ),
- 'bytes_sent': (
- 'cn=Bytes,cn=Statistics,cn=Monitor', 'monitorCounter',
- ),
- 'completed_operations': (
- 'cn=Operations,cn=Monitor', 'monitorOpCompleted',
- ),
- 'initiated_operations': (
- 'cn=Operations,cn=Monitor', 'monitorOpInitiated',
- ),
- 'referrals_sent': (
- 'cn=Referrals,cn=Statistics,cn=Monitor', 'monitorCounter',
- ),
- 'entries_sent': (
- 'cn=Entries,cn=Statistics,cn=Monitor', 'monitorCounter',
- ),
- 'bind_operations': (
- 'cn=Bind,cn=Operations,cn=Monitor', 'monitorOpCompleted',
- ),
- 'unbind_operations': (
- 'cn=Unbind,cn=Operations,cn=Monitor', 'monitorOpCompleted',
- ),
- 'add_operations': (
- 'cn=Add,cn=Operations,cn=Monitor', 'monitorOpInitiated',
- ),
- 'delete_operations': (
- 'cn=Delete,cn=Operations,cn=Monitor', 'monitorOpCompleted',
- ),
- 'modify_operations': (
- 'cn=Modify,cn=Operations,cn=Monitor', 'monitorOpCompleted',
- ),
- 'compare_operations': (
- 'cn=Compare,cn=Operations,cn=Monitor', 'monitorOpCompleted',
- ),
- 'search_operations': (
- 'cn=Search,cn=Operations,cn=Monitor', 'monitorOpCompleted',
- ),
- 'write_waiters': (
- 'cn=Write,cn=Waiters,cn=Monitor', 'monitorCounter',
- ),
- 'read_waiters': (
- 'cn=Read,cn=Waiters,cn=Monitor', 'monitorCounter',
- ),
-}
-
-
-class Service(SimpleService):
- def __init__(self, configuration=None, name=None):
- SimpleService.__init__(self, configuration=configuration, name=name)
- self.order = ORDER
- self.definitions = CHARTS
- self.server = configuration.get('server', DEFAULT_SERVER)
- self.port = configuration.get('port', DEFAULT_PORT)
- self.username = configuration.get('username')
- self.password = configuration.get('password')
- self.timeout = configuration.get('timeout', DEFAULT_TIMEOUT)
- self.use_tls = configuration.get('use_tls', DEFAULT_TLS)
- self.cert_check = configuration.get('cert_check', DEFAULT_CERT_CHECK)
- self.use_start_tls = configuration.get('use_start_tls', DEFAULT_START_TLS)
- self.alive = False
- self.conn = None
-
- def disconnect(self):
- if self.conn:
- self.conn.unbind()
- self.conn = None
- self.alive = False
-
- def connect(self):
- try:
- if self.use_tls:
- self.conn = ldap.initialize('ldaps://%s:%s' % (self.server, self.port))
- else:
- self.conn = ldap.initialize('ldap://%s:%s' % (self.server, self.port))
- self.conn.set_option(ldap.OPT_NETWORK_TIMEOUT, self.timeout)
- if (self.use_tls or self.use_start_tls) and not self.cert_check:
- self.conn.set_option(ldap.OPT_X_TLS_REQUIRE_CERT, ldap.OPT_X_TLS_NEVER)
- if self.use_start_tls or self.use_tls:
- self.conn.set_option(ldap.OPT_X_TLS_NEWCTX, 0)
- if self.use_start_tls:
- self.conn.protocol_version = ldap.VERSION3
- self.conn.start_tls_s()
- if self.username and self.password:
- self.conn.simple_bind(self.username, self.password)
- except ldap.LDAPError as error:
- self.error(error)
- return False
-
- self.alive = True
- return True
-
- def reconnect(self):
- self.disconnect()
- return self.connect()
-
- def check(self):
- if not HAS_LDAP:
- self.error("'python-ldap' package is needed")
- return None
-
- return self.connect() and self.get_data()
-
- def get_data(self):
- if not self.alive and not self.reconnect():
- return None
-
- data = dict()
- for key in SEARCH_LIST:
- dn = SEARCH_LIST[key][0]
- attr = SEARCH_LIST[key][1]
- try:
- num = self.conn.search(dn, ldap.SCOPE_BASE, 'objectClass=*', [attr, ])
- result_type, result_data = self.conn.result(num, 1)
- except ldap.LDAPError as error:
- self.error("Empty result. Check bind username/password. Message: ", error)
- self.alive = False
- return None
-
- if result_type != 101:
- continue
-
- try:
- data[key] = int(list(result_data[0][1].values())[0][0])
- except (ValueError, IndexError) as error:
- self.debug(error)
- continue
-
- return data
diff --git a/src/collectors/python.d.plugin/openldap/openldap.conf b/src/collectors/python.d.plugin/openldap/openldap.conf
deleted file mode 100644
index 5fd99a525..000000000
--- a/src/collectors/python.d.plugin/openldap/openldap.conf
+++ /dev/null
@@ -1,75 +0,0 @@
-# netdata python.d.plugin configuration for openldap
-#
-# This file is in YaML format. Generally the format is:
-#
-# name: value
-#
-# There are 2 sections:
-# - global variables
-# - one or more JOBS
-#
-# JOBS allow you to collect values from multiple sources.
-# Each source will have its own set of charts.
-#
-# JOB parameters have to be indented (using spaces only, example below).
-
-# ----------------------------------------------------------------------
-# Global Variables
-# These variables set the defaults for all JOBs, however each JOB
-# may define its own, overriding the defaults.
-
-# update_every sets the default data collection frequency.
-# If unset, the python.d.plugin default is used.
-# postfix is slow, so once every 10 seconds
-update_every: 10
-
-# priority controls the order of charts at the netdata dashboard.
-# Lower numbers move the charts towards the top of the page.
-# If unset, the default for python.d.plugin is used.
-# priority: 60000
-
-# penalty indicates whether to apply penalty to update_every in case of failures.
-# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
-# penalty: yes
-
-# autodetection_retry sets the job re-check interval in seconds.
-# The job is not deleted if check fails.
-# Attempts to start the job are made once every autodetection_retry.
-# This feature is disabled by default.
-# autodetection_retry: 0
-
-# ----------------------------------------------------------------------
-# JOBS (data collection sources)
-#
-# The default JOBS share the same *name*. JOBS with the same name
-# are mutually exclusive. Only one of them will be allowed running at
-# any time. This allows autodetection to try several alternatives and
-# pick the one that works.
-#
-# Any number of jobs is supported.
-#
-# All python.d.plugin JOBS (for all its modules) support a set of
-# predefined parameters. These are:
-#
-# job_name:
-# name: myname # the JOB's name as it will appear at the
-# # dashboard (by default is the job_name)
-# # JOBs sharing a name are mutually exclusive
-# update_every: 1 # the JOB's data collection frequency
-# priority: 60000 # the JOB's order on the dashboard
-# penalty: yes # the JOB's penalty
-# autodetection_retry: 0 # the JOB's re-check interval in seconds
-#
-# ----------------------------------------------------------------------
-# OPENLDAP EXTRA PARAMETERS
-
-# Set here your LDAP connection settings
-
-#username : "cn=admin,dc=example,dc=com" # The bind user with right to access monitor statistics
-#password : "yourpass" # The password for the binded user
-#server : 'localhost' # The listening address of the LDAP server. In case of TLS, use the hostname which the certificate is published for.
-#port : 389 # The listening port of the LDAP server. Change to 636 port in case of TLS connection
-#use_tls : False # Make True if a TLS connection is used over ldaps://
-#use_start_tls: False # Make True if a TLS connection is used over ldap://
-#cert_check : True # False if you want to ignore certificate check
-#timeout : 1 # Seconds to timeout if no connection exi
diff --git a/src/collectors/python.d.plugin/oracledb/README.md b/src/collectors/python.d.plugin/oracledb/README.md
deleted file mode 120000
index a75e3611e..000000000
--- a/src/collectors/python.d.plugin/oracledb/README.md
+++ /dev/null
@@ -1 +0,0 @@
-integrations/oracle_db.md \ No newline at end of file
diff --git a/src/collectors/python.d.plugin/oracledb/integrations/oracle_db.md b/src/collectors/python.d.plugin/oracledb/integrations/oracle_db.md
deleted file mode 100644
index 4cf1b54a4..000000000
--- a/src/collectors/python.d.plugin/oracledb/integrations/oracle_db.md
+++ /dev/null
@@ -1,260 +0,0 @@
-<!--startmeta
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/collectors/python.d.plugin/oracledb/README.md"
-meta_yaml: "https://github.com/netdata/netdata/edit/master/src/collectors/python.d.plugin/oracledb/metadata.yaml"
-sidebar_label: "Oracle DB"
-learn_status: "Published"
-learn_rel_path: "Collecting Metrics/Databases"
-most_popular: False
-message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
-endmeta-->
-
-# Oracle DB
-
-
-<img src="https://netdata.cloud/img/oracle.svg" width="150"/>
-
-
-Plugin: python.d.plugin
-Module: oracledb
-
-<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
-
-## Overview
-
-This collector monitors OracleDB database metrics about sessions, tables, memory and more.
-
-It collects the metrics via the supported database client library
-
-This collector is supported on all platforms.
-
-This collector supports collecting metrics from multiple instances of this integration, including remote instances.
-
-In order for this collector to work, it needs a read-only user `netdata` in the RDBMS.
-
-
-### Default Behavior
-
-#### Auto-Detection
-
-When the requirements are met, databases on the local host on port 1521 will be auto-detected
-
-#### Limits
-
-The default configuration for this integration does not impose any limits on data collection.
-
-#### Performance Impact
-
-The default configuration for this integration is not expected to impose a significant performance impact on the system.
-
-
-## Metrics
-
-Metrics grouped by *scope*.
-
-The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
-
-These metrics refer to the entire monitored application.
-
-### Per Oracle DB instance
-
-
-
-This scope has no labels.
-
-Metrics:
-
-| Metric | Dimensions | Unit |
-|:------|:----------|:----|
-| oracledb.session_count | total, active | sessions |
-| oracledb.session_limit_usage | usage | % |
-| oracledb.logons | logons | events/s |
-| oracledb.physical_disk_read_writes | reads, writes | events/s |
-| oracledb.sorts_on_disks | sorts | events/s |
-| oracledb.full_table_scans | full table scans | events/s |
-| oracledb.database_wait_time_ratio | wait time ratio | % |
-| oracledb.shared_pool_free_memory | free memory | % |
-| oracledb.in_memory_sorts_ratio | in-memory sorts | % |
-| oracledb.sql_service_response_time | time | seconds |
-| oracledb.user_rollbacks | rollbacks | events/s |
-| oracledb.enqueue_timeouts | enqueue timeouts | events/s |
-| oracledb.cache_hit_ration | buffer, cursor, library, row | % |
-| oracledb.global_cache_blocks | corrupted, lost | events/s |
-| oracledb.activity | parse count, execute count, user commits, user rollbacks | events/s |
-| oracledb.wait_time | application, configuration, administrative, concurrency, commit, network, user I/O, system I/O, scheduler, other | ms |
-| oracledb.tablespace_size | a dimension per active tablespace | KiB |
-| oracledb.tablespace_usage | a dimension per active tablespace | KiB |
-| oracledb.tablespace_usage_in_percent | a dimension per active tablespace | % |
-| oracledb.allocated_size | a dimension per active tablespace | B |
-| oracledb.allocated_usage | a dimension per active tablespace | B |
-| oracledb.allocated_usage_in_percent | a dimension per active tablespace | % |
-
-
-
-## Alerts
-
-There are no alerts configured by default for this integration.
-
-
-## Setup
-
-### Prerequisites
-
-#### Install the python-oracledb package
-
-You can follow the official guide below to install the required package:
-
-Source: https://python-oracledb.readthedocs.io/en/latest/user_guide/installation.html
-
-
-#### Create a read only user for netdata
-
-Follow the official instructions for your oracle RDBMS to create a read-only user for netdata. The operation may follow this approach
-
-Connect to your Oracle database with an administrative user and execute:
-
-```bash
-CREATE USER netdata IDENTIFIED BY <PASSWORD>;
-
-GRANT CONNECT TO netdata;
-GRANT SELECT_CATALOG_ROLE TO netdata;
-```
-
-
-#### Edit the configuration
-
-Edit the configuration troubleshooting:
-
-1. Provide a valid user for the netdata collector to access the database
-2. Specify the network target this database is listening.
-
-
-
-### Configuration
-
-#### File
-
-The configuration file name for this integration is `python.d/oracledb.conf`.
-
-
-You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
-
-```bash
-cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
-sudo ./edit-config python.d/oracledb.conf
-```
-#### Options
-
-There are 2 sections:
-
-* Global variables
-* One or more JOBS that can define multiple different instances to monitor.
-
-The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.
-
-Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition.
-
-Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.
-
-
-<details open><summary>Config options</summary>
-
-| Name | Description | Default | Required |
-|:----|:-----------|:-------|:--------:|
-| update_every | Sets the default data collection frequency. | 5 | no |
-| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |
-| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |
-| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |
-| user | The username for the user account. | no | yes |
-| password | The password for the user account. | no | yes |
-| server | The IP address or hostname (and port) of the Oracle Database Server. | no | yes |
-| service | The Oracle Database service name. To view the services available on your server run this query, `select SERVICE_NAME from gv$session where sid in (select sid from V$MYSTAT)`. | no | yes |
-| protocol | one of the strings "tcp" or "tcps" indicating whether to use unencrypted network traffic or encrypted network traffic | no | yes |
-
-</details>
-
-#### Examples
-
-##### Basic
-
-A basic example configuration, two jobs described for two databases.
-
-```yaml
-local:
- user: 'netdata'
- password: 'secret'
- server: 'localhost:1521'
- service: 'XE'
- protocol: 'tcps'
-
-remote:
- user: 'netdata'
- password: 'secret'
- server: '10.0.0.1:1521'
- service: 'XE'
- protocol: 'tcps'
-
-```
-
-
-## Troubleshooting
-
-### Debug Mode
-
-
-To troubleshoot issues with the `oracledb` collector, run the `python.d.plugin` with the debug option enabled. The output
-should give you clues as to why the collector isn't working.
-
-- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
- your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
-
- ```bash
- cd /usr/libexec/netdata/plugins.d/
- ```
-
-- Switch to the `netdata` user.
-
- ```bash
- sudo -u netdata -s
- ```
-
-- Run the `python.d.plugin` to debug the collector:
-
- ```bash
- ./python.d.plugin oracledb debug trace
- ```
-
-### Getting Logs
-
-If you're encountering problems with the `oracledb` collector, follow these steps to retrieve logs and identify potential issues:
-
-- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
-- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
-
-#### System with systemd
-
-Use the following command to view logs generated since the last Netdata service restart:
-
-```bash
-journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep oracledb
-```
-
-#### System without systemd
-
-Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
-
-```bash
-grep oracledb /var/log/netdata/collector.log
-```
-
-**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
-
-#### Docker Container
-
-If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
-
-```bash
-docker logs netdata 2>&1 | grep oracledb
-```
-
-
diff --git a/src/collectors/python.d.plugin/oracledb/metadata.yaml b/src/collectors/python.d.plugin/oracledb/metadata.yaml
deleted file mode 100644
index f2ab8312b..000000000
--- a/src/collectors/python.d.plugin/oracledb/metadata.yaml
+++ /dev/null
@@ -1,309 +0,0 @@
-plugin_name: python.d.plugin
-modules:
- - meta:
- plugin_name: python.d.plugin
- module_name: oracledb
- monitored_instance:
- name: Oracle DB
- link: "https://docs.oracle.com/en/database/oracle/oracle-database/"
- categories:
- - data-collection.database-servers
- icon_filename: "oracle.svg"
- related_resources:
- integrations:
- list: []
- info_provided_to_referring_integrations:
- description: ""
- keywords:
- - database
- - oracle
- - data warehouse
- - SQL
- most_popular: false
- overview:
- data_collection:
- metrics_description: "This collector monitors OracleDB database metrics about sessions, tables, memory and more."
- method_description: "It collects the metrics via the supported database client library"
- supported_platforms:
- include: []
- exclude: []
- multi_instance: true
- additional_permissions:
- description: |
- In order for this collector to work, it needs a read-only user `netdata` in the RDBMS.
- default_behavior:
- auto_detection:
- description: "When the requirements are met, databases on the local host on port 1521 will be auto-detected"
- limits:
- description: ""
- performance_impact:
- description: ""
- setup:
- prerequisites:
- list:
- - title: Install the python-oracledb package
- description: |
- You can follow the official guide below to install the required package:
-
- Source: https://python-oracledb.readthedocs.io/en/latest/user_guide/installation.html
- - title: Create a read only user for netdata
- description: |
- Follow the official instructions for your oracle RDBMS to create a read-only user for netdata. The operation may follow this approach
-
- Connect to your Oracle database with an administrative user and execute:
-
- ```bash
- CREATE USER netdata IDENTIFIED BY <PASSWORD>;
-
- GRANT CONNECT TO netdata;
- GRANT SELECT_CATALOG_ROLE TO netdata;
- ```
- - title: Edit the configuration
- description: |
- Edit the configuration troubleshooting:
-
- 1. Provide a valid user for the netdata collector to access the database
- 2. Specify the network target this database is listening.
- configuration:
- file:
- name: "python.d/oracledb.conf"
- options:
- description: |
- There are 2 sections:
-
- * Global variables
- * One or more JOBS that can define multiple different instances to monitor.
-
- The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.
-
- Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition.
-
- Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.
- folding:
- title: "Config options"
- enabled: true
- list:
- - name: update_every
- description: Sets the default data collection frequency.
- default_value: 5
- required: false
- - name: priority
- description: Controls the order of charts at the netdata dashboard.
- default_value: 60000
- required: false
- - name: autodetection_retry
- description: Sets the job re-check interval in seconds.
- default_value: 0
- required: false
- - name: penalty
- description: Indicates whether to apply penalty to update_every in case of failures.
- default_value: yes
- required: false
- - name: user
- description: The username for the user account.
- default_value: no
- required: true
- - name: password
- description: The password for the user account.
- default_value: no
- required: true
- - name: server
- description: The IP address or hostname (and port) of the Oracle Database Server.
- default_value: no
- required: true
- - name: service
- description: The Oracle Database service name. To view the services available on your server run this query, `select SERVICE_NAME from gv$session where sid in (select sid from V$MYSTAT)`.
- default_value: no
- required: true
- - name: protocol
- description: one of the strings "tcp" or "tcps" indicating whether to use unencrypted network traffic or encrypted network traffic
- default_value: no
- required: true
- examples:
- folding:
- enabled: true
- title: "Config"
- list:
- - name: Basic
- folding:
- enabled: false
- description: A basic example configuration, two jobs described for two databases.
- config: |
- local:
- user: 'netdata'
- password: 'secret'
- server: 'localhost:1521'
- service: 'XE'
- protocol: 'tcps'
-
- remote:
- user: 'netdata'
- password: 'secret'
- server: '10.0.0.1:1521'
- service: 'XE'
- protocol: 'tcps'
- troubleshooting:
- problems:
- list: []
- alerts: []
- metrics:
- folding:
- title: Metrics
- enabled: false
- description: "These metrics refer to the entire monitored application."
- availability: []
- scopes:
- - name: global
- description: ""
- labels: []
- metrics:
- - name: oracledb.session_count
- description: Session Count
- unit: "sessions"
- chart_type: line
- dimensions:
- - name: total
- - name: active
- - name: oracledb.session_limit_usage
- description: Session Limit Usage
- unit: "%"
- chart_type: area
- dimensions:
- - name: usage
- - name: oracledb.logons
- description: Logons
- unit: "events/s"
- chart_type: area
- dimensions:
- - name: logons
- - name: oracledb.physical_disk_read_writes
- description: Physical Disk Reads/Writes
- unit: "events/s"
- chart_type: area
- dimensions:
- - name: reads
- - name: writes
- - name: oracledb.sorts_on_disks
- description: Sorts On Disk
- unit: "events/s"
- chart_type: line
- dimensions:
- - name: sorts
- - name: oracledb.full_table_scans
- description: Full Table Scans
- unit: "events/s"
- chart_type: line
- dimensions:
- - name: full table scans
- - name: oracledb.database_wait_time_ratio
- description: Database Wait Time Ratio
- unit: "%"
- chart_type: line
- dimensions:
- - name: wait time ratio
- - name: oracledb.shared_pool_free_memory
- description: Shared Pool Free Memory
- unit: "%"
- chart_type: line
- dimensions:
- - name: free memory
- - name: oracledb.in_memory_sorts_ratio
- description: In-Memory Sorts Ratio
- unit: "%"
- chart_type: line
- dimensions:
- - name: in-memory sorts
- - name: oracledb.sql_service_response_time
- description: SQL Service Response Time
- unit: "seconds"
- chart_type: line
- dimensions:
- - name: time
- - name: oracledb.user_rollbacks
- description: User Rollbacks
- unit: "events/s"
- chart_type: line
- dimensions:
- - name: rollbacks
- - name: oracledb.enqueue_timeouts
- description: Enqueue Timeouts
- unit: "events/s"
- chart_type: line
- dimensions:
- - name: enqueue timeouts
- - name: oracledb.cache_hit_ration
- description: Cache Hit Ratio
- unit: "%"
- chart_type: stacked
- dimensions:
- - name: buffer
- - name: cursor
- - name: library
- - name: row
- - name: oracledb.global_cache_blocks
- description: Global Cache Blocks Events
- unit: "events/s"
- chart_type: area
- dimensions:
- - name: corrupted
- - name: lost
- - name: oracledb.activity
- description: Activities
- unit: "events/s"
- chart_type: stacked
- dimensions:
- - name: parse count
- - name: execute count
- - name: user commits
- - name: user rollbacks
- - name: oracledb.wait_time
- description: Wait Time
- unit: "ms"
- chart_type: stacked
- dimensions:
- - name: application
- - name: configuration
- - name: administrative
- - name: concurrency
- - name: commit
- - name: network
- - name: user I/O
- - name: system I/O
- - name: scheduler
- - name: other
- - name: oracledb.tablespace_size
- description: Size
- unit: "KiB"
- chart_type: line
- dimensions:
- - name: a dimension per active tablespace
- - name: oracledb.tablespace_usage
- description: Usage
- unit: "KiB"
- chart_type: line
- dimensions:
- - name: a dimension per active tablespace
- - name: oracledb.tablespace_usage_in_percent
- description: Usage
- unit: "%"
- chart_type: line
- dimensions:
- - name: a dimension per active tablespace
- - name: oracledb.allocated_size
- description: Size
- unit: "B"
- chart_type: line
- dimensions:
- - name: a dimension per active tablespace
- - name: oracledb.allocated_usage
- description: Usage
- unit: "B"
- chart_type: line
- dimensions:
- - name: a dimension per active tablespace
- - name: oracledb.allocated_usage_in_percent
- description: Usage
- unit: "%"
- chart_type: line
- dimensions:
- - name: a dimension per active tablespace
diff --git a/src/collectors/python.d.plugin/oracledb/oracledb.chart.py b/src/collectors/python.d.plugin/oracledb/oracledb.chart.py
deleted file mode 100644
index 455cf270e..000000000
--- a/src/collectors/python.d.plugin/oracledb/oracledb.chart.py
+++ /dev/null
@@ -1,846 +0,0 @@
-# -*- coding: utf-8 -*-
-# Description: oracledb netdata python.d module
-# Author: ilyam8 (Ilya Mashchenko)
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-from copy import deepcopy
-
-from bases.FrameworkServices.SimpleService import SimpleService
-
-try:
- import oracledb as cx_Oracle
-
- HAS_ORACLE_NEW = True
- HAS_ORACLE_OLD = False
-except ImportError:
- HAS_ORACLE_NEW = False
- try:
- import cx_Oracle
-
- HAS_ORACLE_OLD = True
- except ImportError:
- HAS_ORACLE_OLD = False
-
-ORDER = [
- 'session_count',
- 'session_limit_usage',
- 'logons',
- 'physical_disk_read_write',
- 'sorts_on_disk',
- 'full_table_scans',
- 'database_wait_time_ratio',
- 'shared_pool_free_memory',
- 'in_memory_sorts_ratio',
- 'sql_service_response_time',
- 'user_rollbacks',
- 'enqueue_timeouts',
- 'cache_hit_ratio',
- 'global_cache_blocks',
- 'activity',
- 'wait_time',
- 'tablespace_size',
- 'tablespace_usage',
- 'tablespace_usage_in_percent',
- 'allocated_size',
- 'allocated_usage',
- 'allocated_usage_in_percent',
-]
-
-CHARTS = {
- 'session_count': {
- 'options': [None, 'Session Count', 'sessions', 'session activity', 'oracledb.session_count', 'line'],
- 'lines': [
- ['session_count', 'total', 'absolute', 1, 1000],
- ['average_active_sessions', 'active', 'absolute', 1, 1000],
- ]
- },
- 'session_limit_usage': {
- 'options': [None, 'Session Limit Usage', '%', 'session activity', 'oracledb.session_limit_usage', 'area'],
- 'lines': [
- ['session_limit_percent', 'usage', 'absolute', 1, 1000],
- ]
- },
- 'logons': {
- 'options': [None, 'Logons', 'events/s', 'session activity', 'oracledb.logons', 'area'],
- 'lines': [
- ['logons_per_sec', 'logons', 'absolute', 1, 1000],
- ]
- },
- 'physical_disk_read_write': {
- 'options': [None, 'Physical Disk Reads/Writes', 'events/s', 'disk activity',
- 'oracledb.physical_disk_read_writes', 'area'],
- 'lines': [
- ['physical_reads_per_sec', 'reads', 'absolute', 1, 1000],
- ['physical_writes_per_sec', 'writes', 'absolute', -1, 1000],
- ]
- },
- 'sorts_on_disk': {
- 'options': [None, 'Sorts On Disk', 'events/s', 'disk activity', 'oracledb.sorts_on_disks', 'line'],
- 'lines': [
- ['disk_sort_per_sec', 'sorts', 'absolute', 1, 1000],
- ]
- },
- 'full_table_scans': {
- 'options': [None, 'Full Table Scans', 'events/s', 'disk activity', 'oracledb.full_table_scans', 'line'],
- 'lines': [
- ['long_table_scans_per_sec', 'full table scans', 'absolute', 1, 1000],
- ]
- },
- 'database_wait_time_ratio': {
- 'options': [None, 'Database Wait Time Ratio', '%', 'database and buffer activity',
- 'oracledb.database_wait_time_ratio', 'line'],
- 'lines': [
- ['database_wait_time_ratio', 'wait time ratio', 'absolute', 1, 1000],
- ]
- },
- 'shared_pool_free_memory': {
- 'options': [None, 'Shared Pool Free Memory', '%', 'database and buffer activity',
- 'oracledb.shared_pool_free_memory', 'line'],
- 'lines': [
- ['shared_pool_free_percent', 'free memory', 'absolute', 1, 1000],
- ]
- },
- 'in_memory_sorts_ratio': {
- 'options': [None, 'In-Memory Sorts Ratio', '%', 'database and buffer activity',
- 'oracledb.in_memory_sorts_ratio', 'line'],
- 'lines': [
- ['memory_sorts_ratio', 'in-memory sorts', 'absolute', 1, 1000],
- ]
- },
- 'sql_service_response_time': {
- 'options': [None, 'SQL Service Response Time', 'seconds', 'database and buffer activity',
- 'oracledb.sql_service_response_time', 'line'],
- 'lines': [
- ['sql_service_response_time', 'time', 'absolute', 1, 1000],
- ]
- },
- 'user_rollbacks': {
- 'options': [None, 'User Rollbacks', 'events/s', 'database and buffer activity',
- 'oracledb.user_rollbacks', 'line'],
- 'lines': [
- ['user_rollbacks_per_sec', 'rollbacks', 'absolute', 1, 1000],
- ]
- },
- 'enqueue_timeouts': {
- 'options': [None, 'Enqueue Timeouts', 'events/s', 'database and buffer activity',
- 'oracledb.enqueue_timeouts', 'line'],
- 'lines': [
- ['enqueue_timeouts_per_sec', 'enqueue timeouts', 'absolute', 1, 1000],
- ]
- },
- 'cache_hit_ratio': {
- 'options': [None, 'Cache Hit Ratio', '%', 'cache', 'oracledb.cache_hit_ration', 'stacked'],
- 'lines': [
- ['buffer_cache_hit_ratio', 'buffer', 'absolute', 1, 1000],
- ['cursor_cache_hit_ratio', 'cursor', 'absolute', 1, 1000],
- ['library_cache_hit_ratio', 'library', 'absolute', 1, 1000],
- ['row_cache_hit_ratio', 'row', 'absolute', 1, 1000],
- ]
- },
- 'global_cache_blocks': {
- 'options': [None, 'Global Cache Blocks Events', 'events/s', 'cache', 'oracledb.global_cache_blocks', 'area'],
- 'lines': [
- ['global_cache_blocks_corrupted', 'corrupted', 'incremental', 1, 1000],
- ['global_cache_blocks_lost', 'lost', 'incremental', 1, 1000],
- ]
- },
- 'activity': {
- 'options': [None, 'Activities', 'events/s', 'activities', 'oracledb.activity', 'stacked'],
- 'lines': [
- ['activity_parse_count_total', 'parse count', 'incremental', 1, 1000],
- ['activity_execute_count', 'execute count', 'incremental', 1, 1000],
- ['activity_user_commits', 'user commits', 'incremental', 1, 1000],
- ['activity_user_rollbacks', 'user rollbacks', 'incremental', 1, 1000],
- ]
- },
- 'wait_time': {
- 'options': [None, 'Wait Time', 'ms', 'wait time', 'oracledb.wait_time', 'stacked'],
- 'lines': [
- ['wait_time_application', 'application', 'absolute', 1, 1000],
- ['wait_time_configuration', 'configuration', 'absolute', 1, 1000],
- ['wait_time_administrative', 'administrative', 'absolute', 1, 1000],
- ['wait_time_concurrency', 'concurrency', 'absolute', 1, 1000],
- ['wait_time_commit', 'commit', 'absolute', 1, 1000],
- ['wait_time_network', 'network', 'absolute', 1, 1000],
- ['wait_time_user_io', 'user I/O', 'absolute', 1, 1000],
- ['wait_time_system_io', 'system I/O', 'absolute', 1, 1000],
- ['wait_time_scheduler', 'scheduler', 'absolute', 1, 1000],
- ['wait_time_other', 'other', 'absolute', 1, 1000],
- ]
- },
- 'tablespace_size': {
- 'options': [None, 'Size', 'KiB', 'tablespace', 'oracledb.tablespace_size', 'line'],
- 'lines': [],
- },
- 'tablespace_usage': {
- 'options': [None, 'Usage', 'KiB', 'tablespace', 'oracledb.tablespace_usage', 'line'],
- 'lines': [],
- },
- 'tablespace_usage_in_percent': {
- 'options': [None, 'Usage', '%', 'tablespace', 'oracledb.tablespace_usage_in_percent', 'line'],
- 'lines': [],
- },
- 'allocated_size': {
- 'options': [None, 'Size', 'B', 'tablespace', 'oracledb.allocated_size', 'line'],
- 'lines': [],
- },
- 'allocated_usage': {
- 'options': [None, 'Usage', 'B', 'tablespace', 'oracledb.allocated_usage', 'line'],
- 'lines': [],
- },
- 'allocated_usage_in_percent': {
- 'options': [None, 'Usage', '%', 'tablespace', 'oracledb.allocated_usage_in_percent', 'line'],
- 'lines': [],
- },
-}
-
-CX_CONNECT_STRING_OLD = "{0}/{1}@//{2}/{3}"
-
-QUERY_SYSTEM = '''
-SELECT
- metric_name,
- value
-FROM
- gv$sysmetric
-ORDER BY
- begin_time
-'''
-QUERY_TABLESPACE = '''
-SELECT
- m.tablespace_name,
- m.used_space * t.block_size AS used_bytes,
- m.tablespace_size * t.block_size AS max_bytes,
- m.used_percent
-FROM
- dba_tablespace_usage_metrics m
- JOIN dba_tablespaces t ON m.tablespace_name = t.tablespace_name
-'''
-QUERY_ALLOCATED = '''
-SELECT
- nvl(b.tablespace_name,nvl(a.tablespace_name,'UNKNOWN')) tablespace_name,
- bytes_alloc used_bytes,
- bytes_alloc-nvl(bytes_free,0) max_bytes,
- ((bytes_alloc-nvl(bytes_free,0))/ bytes_alloc)*100 used_percent
-FROM
- (SELECT
- sum(bytes) bytes_free,
- tablespace_name
- FROM sys.dba_free_space
- GROUP BY tablespace_name
- ) a,
- (SELECT
- sum(bytes) bytes_alloc,
- tablespace_name
- FROM sys.dba_data_files
- GROUP BY tablespace_name
- ) b
-WHERE a.tablespace_name (+) = b.tablespace_name
-'''
-QUERY_ACTIVITIES_COUNT = '''
-SELECT
- name,
- value
-FROM
- v$sysstat
-WHERE
- name IN (
- 'parse count (total)',
- 'execute count',
- 'user commits',
- 'user rollbacks'
- )
-'''
-QUERY_WAIT_TIME = '''
-SELECT
- n.wait_class,
- round(m.time_waited / m.INTSIZE_CSEC, 3)
-FROM
- v$waitclassmetric m,
- v$system_wait_class n
-WHERE
- m.wait_class_id = n.wait_class_id
- AND n.wait_class != 'Idle'
-'''
-# QUERY_SESSION_COUNT = '''
-# SELECT
-# status,
-# type
-# FROM
-# v$session
-# GROUP BY
-# status,
-# type
-# '''
-# QUERY_PROCESSES_COUNT = '''
-# SELECT
-# COUNT(*)
-# FROM
-# v$process
-# '''
-# QUERY_PROCESS = '''
-# SELECT
-# program,
-# pga_used_mem,
-# pga_alloc_mem,
-# pga_freeable_mem,
-# pga_max_mem
-# FROM
-# gv$process
-# '''
-
-# PROCESS_METRICS = [
-# 'pga_used_memory',
-# 'pga_allocated_memory',
-# 'pga_freeable_memory',
-# 'pga_maximum_memory',
-# ]
-
-
-SYS_METRICS = {
- 'Average Active Sessions': 'average_active_sessions',
- 'Session Count': 'session_count',
- 'Session Limit %': 'session_limit_percent',
- 'Logons Per Sec': 'logons_per_sec',
- 'Physical Reads Per Sec': 'physical_reads_per_sec',
- 'Physical Writes Per Sec': 'physical_writes_per_sec',
- 'Disk Sort Per Sec': 'disk_sort_per_sec',
- 'Long Table Scans Per Sec': 'long_table_scans_per_sec',
- 'Database Wait Time Ratio': 'database_wait_time_ratio',
- 'Shared Pool Free %': 'shared_pool_free_percent',
- 'Memory Sorts Ratio': 'memory_sorts_ratio',
- 'SQL Service Response Time': 'sql_service_response_time',
- 'User Rollbacks Per Sec': 'user_rollbacks_per_sec',
- 'Enqueue Timeouts Per Sec': 'enqueue_timeouts_per_sec',
- 'Buffer Cache Hit Ratio': 'buffer_cache_hit_ratio',
- 'Cursor Cache Hit Ratio': 'cursor_cache_hit_ratio',
- 'Library Cache Hit Ratio': 'library_cache_hit_ratio',
- 'Row Cache Hit Ratio': 'row_cache_hit_ratio',
- 'Global Cache Blocks Corrupted': 'global_cache_blocks_corrupted',
- 'Global Cache Blocks Lost': 'global_cache_blocks_lost',
-}
-
-
-class Service(SimpleService):
- def __init__(self, configuration=None, name=None):
- SimpleService.__init__(self, configuration=configuration, name=name)
- self.order = ORDER
- self.definitions = deepcopy(CHARTS)
- self.user = configuration.get('user')
- self.password = configuration.get('password')
- self.server = configuration.get('server')
- self.service = configuration.get('service')
- self.protocol = configuration.get('protocol', 'tcps')
- self.alive = False
- self.conn = None
- self.active_tablespaces = set()
-
- def connect(self):
- if self.conn:
- self.conn.close()
- self.conn = None
- if HAS_ORACLE_NEW:
- try:
- self.conn = cx_Oracle.connect(
- f'{self.user}/{self.password}@{self.protocol}://{self.server}/{self.service}')
- except cx_Oracle.DatabaseError as error:
- self.error(error)
- return False
- else:
- try:
- self.conn = cx_Oracle.connect(
- CX_CONNECT_STRING_OLD.format(
- self.user,
- self.password,
- self.server,
- self.service,
- ))
- except cx_Oracle.DatabaseError as error:
- self.error(error)
- return False
-
- self.alive = True
- return True
-
- def reconnect(self):
- return self.connect()
-
- def check(self):
- if not HAS_ORACLE_NEW and not HAS_ORACLE_OLD:
- self.error("'oracledb' package is needed to use oracledb module")
- return False
-
- if not all([
- self.user,
- self.password,
- self.server,
- self.service
- ]):
- self.error("one of these parameters is not specified: user, password, server, service")
- return False
-
- if not self.connect():
- return False
-
- return bool(self.get_data())
-
- def get_data(self):
- if not self.alive and not self.reconnect():
- return None
-
- data = dict()
-
- # SYSTEM
- try:
- rv = self.gather_system_metrics()
- except cx_Oracle.Error as error:
- self.error(error)
- self.alive = False
- return None
- else:
- for name, value in rv:
- if name not in SYS_METRICS:
- continue
- data[SYS_METRICS[name]] = int(float(value) * 1000)
-
- # ACTIVITIES COUNT
- try:
- rv = self.gather_activities_count()
- except cx_Oracle.Error as error:
- self.error(error)
- self.alive = False
- return None
- else:
- for name, amount in rv:
- cleaned = name.replace(' ', '_').replace('(', '').replace(')', '')
- new_name = 'activity_{0}'.format(cleaned)
- data[new_name] = int(float(amount) * 1000)
-
- # WAIT TIME
- try:
- rv = self.gather_wait_time_metrics()
- except cx_Oracle.Error as error:
- self.error(error)
- self.alive = False
- return None
- else:
- for name, amount in rv:
- cleaned = name.replace(' ', '_').replace('/', '').lower()
- new_name = 'wait_time_{0}'.format(cleaned)
- data[new_name] = amount
-
- # TABLESPACE
- try:
- rv = self.gather_tablespace_metrics()
- except cx_Oracle.Error as error:
- self.error(error)
- self.alive = False
- return None
- else:
- for name, offline, size, used, used_in_percent in rv:
- # TODO: skip offline?
- if not (not offline and self.charts):
- continue
- # TODO: remove inactive?
- if name not in self.active_tablespaces:
- self.active_tablespaces.add(name)
- self.add_tablespace_to_charts(name)
- data['{0}_tablespace_size'.format(name)] = int(size * 1000)
- data['{0}_tablespace_used'.format(name)] = int(used * 1000)
- data['{0}_tablespace_used_in_percent'.format(name)] = int(used_in_percent * 1000)
-
- # ALLOCATED SPACE
- try:
- rv = self.gather_allocated_metrics()
- except cx_Oracle.Error as error:
- self.error(error)
- self.alive = False
- return None
- else:
- for name, offline, size, used, used_in_percent in rv:
- # TODO: skip offline?
- if not (not offline and self.charts):
- continue
- # TODO: remove inactive?
- if name not in self.active_tablespaces:
- self.active_tablespaces.add(name)
- self.add_tablespace_to_charts(name)
- data['{0}_allocated_size'.format(name)] = int(size * 1000)
- data['{0}_allocated_used'.format(name)] = int(used * 1000)
- data['{0}_allocated_used_in_percent'.format(name)] = int(used_in_percent * 1000)
-
- return data or None
-
- def gather_system_metrics(self):
-
- """
- :return:
-
- [['Buffer Cache Hit Ratio', 100],
- ['Memory Sorts Ratio', 100],
- ['Redo Allocation Hit Ratio', 100],
- ['User Transaction Per Sec', 0],
- ['Physical Reads Per Sec', 0],
- ['Physical Reads Per Txn', 0],
- ['Physical Writes Per Sec', 0],
- ['Physical Writes Per Txn', 0],
- ['Physical Reads Direct Per Sec', 0],
- ['Physical Reads Direct Per Txn', 0],
- ['Physical Writes Direct Per Sec', 0],
- ['Physical Writes Direct Per Txn', 0],
- ['Physical Reads Direct Lobs Per Sec', 0],
- ['Physical Reads Direct Lobs Per Txn', 0],
- ['Physical Writes Direct Lobs Per Sec', 0],
- ['Physical Writes Direct Lobs Per Txn', 0],
- ['Redo Generated Per Sec', Decimal('4.66666666666667')],
- ['Redo Generated Per Txn', 280],
- ['Logons Per Sec', Decimal('0.0166666666666667')],
- ['Logons Per Txn', 1],
- ['Open Cursors Per Sec', 0.35],
- ['Open Cursors Per Txn', 21],
- ['User Commits Per Sec', 0],
- ['User Commits Percentage', 0],
- ['User Rollbacks Per Sec', 0],
- ['User Rollbacks Percentage', 0],
- ['User Calls Per Sec', Decimal('0.0333333333333333')],
- ['User Calls Per Txn', 2],
- ['Recursive Calls Per Sec', 14.15],
- ['Recursive Calls Per Txn', 849],
- ['Logical Reads Per Sec', Decimal('0.683333333333333')],
- ['Logical Reads Per Txn', 41],
- ['DBWR Checkpoints Per Sec', 0],
- ['Background Checkpoints Per Sec', 0],
- ['Redo Writes Per Sec', Decimal('0.0333333333333333')],
- ['Redo Writes Per Txn', 2],
- ['Long Table Scans Per Sec', 0],
- ['Long Table Scans Per Txn', 0],
- ['Total Table Scans Per Sec', Decimal('0.0166666666666667')],
- ['Total Table Scans Per Txn', 1],
- ['Full Index Scans Per Sec', 0],
- ['Full Index Scans Per Txn', 0],
- ['Total Index Scans Per Sec', Decimal('0.216666666666667')],
- ['Total Index Scans Per Txn', 13],
- ['Total Parse Count Per Sec', 0.35],
- ['Total Parse Count Per Txn', 21],
- ['Hard Parse Count Per Sec', 0],
- ['Hard Parse Count Per Txn', 0],
- ['Parse Failure Count Per Sec', 0],
- ['Parse Failure Count Per Txn', 0],
- ['Cursor Cache Hit Ratio', Decimal('52.3809523809524')],
- ['Disk Sort Per Sec', 0],
- ['Disk Sort Per Txn', 0],
- ['Rows Per Sort', 8.6],
- ['Execute Without Parse Ratio', Decimal('27.5862068965517')],
- ['Soft Parse Ratio', 100],
- ['User Calls Ratio', Decimal('0.235017626321974')],
- ['Host CPU Utilization (%)', Decimal('0.124311845142959')],
- ['Network Traffic Volume Per Sec', 0],
- ['Enqueue Timeouts Per Sec', 0],
- ['Enqueue Timeouts Per Txn', 0],
- ['Enqueue Waits Per Sec', 0],
- ['Enqueue Waits Per Txn', 0],
- ['Enqueue Deadlocks Per Sec', 0],
- ['Enqueue Deadlocks Per Txn', 0],
- ['Enqueue Requests Per Sec', Decimal('216.683333333333')],
- ['Enqueue Requests Per Txn', 13001],
- ['DB Block Gets Per Sec', 0],
- ['DB Block Gets Per Txn', 0],
- ['Consistent Read Gets Per Sec', Decimal('0.683333333333333')],
- ['Consistent Read Gets Per Txn', 41],
- ['DB Block Changes Per Sec', 0],
- ['DB Block Changes Per Txn', 0],
- ['Consistent Read Changes Per Sec', 0],
- ['Consistent Read Changes Per Txn', 0],
- ['CPU Usage Per Sec', 0],
- ['CPU Usage Per Txn', 0],
- ['CR Blocks Created Per Sec', 0],
- ['CR Blocks Created Per Txn', 0],
- ['CR Undo Records Applied Per Sec', 0],
- ['CR Undo Records Applied Per Txn', 0],
- ['User Rollback UndoRec Applied Per Sec', 0],
- ['User Rollback Undo Records Applied Per Txn', 0],
- ['Leaf Node Splits Per Sec', 0],
- ['Leaf Node Splits Per Txn', 0],
- ['Branch Node Splits Per Sec', 0],
- ['Branch Node Splits Per Txn', 0],
- ['PX downgraded 1 to 25% Per Sec', 0],
- ['PX downgraded 25 to 50% Per Sec', 0],
- ['PX downgraded 50 to 75% Per Sec', 0],
- ['PX downgraded 75 to 99% Per Sec', 0],
- ['PX downgraded to serial Per Sec', 0],
- ['Physical Read Total IO Requests Per Sec', Decimal('2.16666666666667')],
- ['Physical Read Total Bytes Per Sec', Decimal('35498.6666666667')],
- ['GC CR Block Received Per Second', 0],
- ['GC CR Block Received Per Txn', 0],
- ['GC Current Block Received Per Second', 0],
- ['GC Current Block Received Per Txn', 0],
- ['Global Cache Average CR Get Time', 0],
- ['Global Cache Average Current Get Time', 0],
- ['Physical Write Total IO Requests Per Sec', Decimal('0.966666666666667')],
- ['Global Cache Blocks Corrupted', 0],
- ['Global Cache Blocks Lost', 0],
- ['Current Logons Count', 49],
- ['Current Open Cursors Count', 64],
- ['User Limit %', Decimal('0.00000114087015416959')],
- ['SQL Service Response Time', 0],
- ['Database Wait Time Ratio', 0],
- ['Database CPU Time Ratio', 0],
- ['Response Time Per Txn', 0],
- ['Row Cache Hit Ratio', 100],
- ['Row Cache Miss Ratio', 0],
- ['Library Cache Hit Ratio', 100],
- ['Library Cache Miss Ratio', 0],
- ['Shared Pool Free %', Decimal('7.82380268491548')],
- ['PGA Cache Hit %', Decimal('98.0399767109115')],
- ['Process Limit %', Decimal('17.6666666666667')],
- ['Session Limit %', Decimal('15.2542372881356')],
- ['Executions Per Txn', 29],
- ['Executions Per Sec', Decimal('0.483333333333333')],
- ['Txns Per Logon', 0],
- ['Database Time Per Sec', 0],
- ['Physical Write Total Bytes Per Sec', 15308.8],
- ['Physical Read IO Requests Per Sec', 0],
- ['Physical Read Bytes Per Sec', 0],
- ['Physical Write IO Requests Per Sec', 0],
- ['Physical Write Bytes Per Sec', 0],
- ['DB Block Changes Per User Call', 0],
- ['DB Block Gets Per User Call', 0],
- ['Executions Per User Call', 14.5],
- ['Logical Reads Per User Call', 20.5],
- ['Total Sorts Per User Call', 2.5],
- ['Total Table Scans Per User Call', 0.5],
- ['Current OS Load', 0.0390625],
- ['Streams Pool Usage Percentage', 0],
- ['PQ QC Session Count', 0],
- ['PQ Slave Session Count', 0],
- ['Queries parallelized Per Sec', 0],
- ['DML statements parallelized Per Sec', 0],
- ['DDL statements parallelized Per Sec', 0],
- ['PX operations not downgraded Per Sec', 0],
- ['Session Count', 72],
- ['Average Synchronous Single-Block Read Latency', 0],
- ['I/O Megabytes per Second', 0.05],
- ['I/O Requests per Second', Decimal('3.13333333333333')],
- ['Average Active Sessions', 0],
- ['Active Serial Sessions', 1],
- ['Active Parallel Sessions', 0],
- ['Captured user calls', 0],
- ['Replayed user calls', 0],
- ['Workload Capture and Replay status', 0],
- ['Background CPU Usage Per Sec', Decimal('1.22578833333333')],
- ['Background Time Per Sec', 0.0147551],
- ['Host CPU Usage Per Sec', Decimal('0.116666666666667')],
- ['Cell Physical IO Interconnect Bytes', 3048448],
- ['Temp Space Used', 0],
- ['Total PGA Allocated', 200657920],
- ['Total PGA Used by SQL Workareas', 0],
- ['Run Queue Per Sec', 0],
- ['VM in bytes Per Sec', 0],
- ['VM out bytes Per Sec', 0]]
- """
-
- metrics = list()
- with self.conn.cursor() as cursor:
- cursor.execute(QUERY_SYSTEM)
- for metric_name, value in cursor.fetchall():
- metrics.append([metric_name, value])
- return metrics
-
- def gather_tablespace_metrics(self):
- """
- :return:
-
- [['SYSTEM', 874250240.0, 3233169408.0, 27.040038107400033, 0],
- ['SYSAUX', 498860032.0, 3233169408.0, 15.429443033997678, 0],
- ['TEMP', 0.0, 3233177600.0, 0.0, 0],
- ['USERS', 1048576.0, 3233169408.0, 0.03243182981397305, 0]]
- """
- metrics = list()
- with self.conn.cursor() as cursor:
- cursor.execute(QUERY_TABLESPACE)
- for tablespace_name, used_bytes, max_bytes, used_percent in cursor.fetchall():
- if used_bytes is None:
- offline = True
- used = 0
- else:
- offline = False
- used = float(used_bytes)
- if max_bytes is None:
- size = 0
- else:
- size = float(max_bytes)
- if used_percent is None:
- used_percent = 0
- else:
- used_percent = float(used_percent)
- metrics.append(
- [
- tablespace_name,
- offline,
- size,
- used,
- used_percent,
- ]
- )
- return metrics
-
- def gather_allocated_metrics(self):
- """
- :return:
-
- [['SYSTEM', 874250240.0, 3233169408.0, 27.040038107400033, 0],
- ['SYSAUX', 498860032.0, 3233169408.0, 15.429443033997678, 0],
- ['TEMP', 0.0, 3233177600.0, 0.0, 0],
- ['USERS', 1048576.0, 3233169408.0, 0.03243182981397305, 0]]
- """
- metrics = list()
- with self.conn.cursor() as cursor:
- cursor.execute(QUERY_ALLOCATED)
- for tablespace_name, used_bytes, max_bytes, used_percent in cursor.fetchall():
- if used_bytes is None:
- offline = True
- used = 0
- else:
- offline = False
- used = float(used_bytes)
- if max_bytes is None:
- size = 0
- else:
- size = float(max_bytes)
- if used_percent is None:
- used_percent = 0
- else:
- used_percent = float(used_percent)
- metrics.append(
- [
- tablespace_name,
- offline,
- size,
- used,
- used_percent,
- ]
- )
- return metrics
-
- def gather_wait_time_metrics(self):
- """
- :return:
-
- [['Other', 0],
- ['Application', 0],
- ['Configuration', 0],
- ['Administrative', 0],
- ['Concurrency', 0],
- ['Commit', 0],
- ['Network', 0],
- ['User I/O', 0],
- ['System I/O', 0.002],
- ['Scheduler', 0]]
- """
- metrics = list()
- with self.conn.cursor() as cursor:
- cursor.execute(QUERY_WAIT_TIME)
- for wait_class_name, value in cursor.fetchall():
- metrics.append([wait_class_name, value])
- return metrics
-
- def gather_activities_count(self):
- """
- :return:
-
- [('user commits', 9104),
- ('user rollbacks', 17),
- ('parse count (total)', 483695),
- ('execute count', 2020356)]
- """
- with self.conn.cursor() as cursor:
- cursor.execute(QUERY_ACTIVITIES_COUNT)
- return cursor.fetchall()
-
- # def gather_process_metrics(self):
- # """
- # :return:
- #
- # [['PSEUDO', 'pga_used_memory', 0],
- # ['PSEUDO', 'pga_allocated_memory', 0],
- # ['PSEUDO', 'pga_freeable_memory', 0],
- # ['PSEUDO', 'pga_maximum_memory', 0],
- # ['oracle@localhost.localdomain (PMON)', 'pga_used_memory', 1793827],
- # ['oracle@localhost.localdomain (PMON)', 'pga_allocated_memory', 1888651],
- # ['oracle@localhost.localdomain (PMON)', 'pga_freeable_memory', 0],
- # ['oracle@localhost.localdomain (PMON)', 'pga_maximum_memory', 1888651],
- # ...
- # ...
- # """
- #
- # metrics = list()
- # with self.conn.cursor() as cursor:
- # cursor.execute(QUERY_PROCESS)
- # for row in cursor.fetchall():
- # for i, name in enumerate(PROCESS_METRICS, 1):
- # metrics.append([row[0], name, row[i]])
- # return metrics
-
- # def gather_processes_count(self):
- # with self.conn.cursor() as cursor:
- # cursor.execute(QUERY_PROCESSES_COUNT)
- # return cursor.fetchone()[0] # 53
-
- # def gather_sessions_count(self):
- # with self.conn.cursor() as cursor:
- # cursor.execute(QUERY_SESSION_COUNT)
- # total, active, inactive = 0, 0, 0
- # for status, _ in cursor.fetchall():
- # total += 1
- # active += status == 'ACTIVE'
- # inactive += status == 'INACTIVE'
- # return [total, active, inactive]
-
- def add_tablespace_to_charts(self, name):
- self.charts['tablespace_size'].add_dimension(
- [
- '{0}_tablespace_size'.format(name),
- name,
- 'absolute',
- 1,
- 1024 * 1000,
- ])
- self.charts['tablespace_usage'].add_dimension(
- [
- '{0}_tablespace_used'.format(name),
- name,
- 'absolute',
- 1,
- 1024 * 1000,
- ])
- self.charts['tablespace_usage_in_percent'].add_dimension(
- [
- '{0}_tablespace_used_in_percent'.format(name),
- name,
- 'absolute',
- 1,
- 1000,
- ])
- self.charts['allocated_size'].add_dimension(
- [
- '{0}_allocated_size'.format(name),
- name,
- 'absolute',
- 1,
- 1000,
- ])
- self.charts['allocated_usage'].add_dimension(
- [
- '{0}_allocated_used'.format(name),
- name,
- 'absolute',
- 1,
- 1000,
- ])
- self.charts['allocated_usage_in_percent'].add_dimension(
- [
- '{0}_allocated_used_in_percent'.format(name),
- name,
- 'absolute',
- 1,
- 1000,
- ])
diff --git a/src/collectors/python.d.plugin/oracledb/oracledb.conf b/src/collectors/python.d.plugin/oracledb/oracledb.conf
deleted file mode 100644
index 027215dad..000000000
--- a/src/collectors/python.d.plugin/oracledb/oracledb.conf
+++ /dev/null
@@ -1,88 +0,0 @@
-# netdata python.d.plugin configuration for oracledb
-#
-# This file is in YaML format. Generally the format is:
-#
-# name: value
-#
-# There are 2 sections:
-# - global variables
-# - one or more JOBS
-#
-# JOBS allow you to collect values from multiple sources.
-# Each source will have its own set of charts.
-#
-# JOB parameters have to be indented (using spaces only, example below).
-
-# ----------------------------------------------------------------------
-# Global Variables
-# These variables set the defaults for all JOBs, however each JOB
-# may define its own, overriding the defaults.
-
-# update_every sets the default data collection frequency.
-# If unset, the python.d.plugin default is used.
-# update_every: 1
-
-# priority controls the order of charts at the netdata dashboard.
-# Lower numbers move the charts towards the top of the page.
-# If unset, the default for python.d.plugin is used.
-# priority: 60000
-
-# penalty indicates whether to apply penalty to update_every in case of failures.
-# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
-# penalty: yes
-
-# autodetection_retry sets the job re-check interval in seconds.
-# The job is not deleted if check fails.
-# Attempts to start the job are made once every autodetection_retry.
-# This feature is disabled by default.
-# autodetection_retry: 0
-
-# ----------------------------------------------------------------------
-# JOBS (data collection sources)
-#
-# The default JOBS share the same *name*. JOBS with the same name
-# are mutually exclusive. Only one of them will be allowed running at
-# any time. This allows autodetection to try several alternatives and
-# pick the one that works.
-#
-# Any number of jobs is supported.
-#
-# All python.d.plugin JOBS (for all its modules) support a set of
-# predefined parameters. These are:
-#
-# job_name:
-# name: myname # the JOB's name as it will appear at the
-# # dashboard (by default is the job_name)
-# # JOBs sharing a name are mutually exclusive
-# update_every: 1 # the JOB's data collection frequency
-# priority: 60000 # the JOB's order on the dashboard
-# penalty: yes # the JOB's penalty
-# autodetection_retry: 0 # the JOB's re-check interval in seconds
-#
-# Additionally to the above, oracledb also supports the following:
-#
-# user: username # the username for the user account. Required.
-# password: password # the password for the user account. Required.
-# server: localhost:1521 # the IP address or hostname (and port) of the Oracle Database Server. Required.
-# service: XE # the Oracle Database service name. Required. To view the services available on your server,
-# run this query: `select SERVICE_NAME from gv$session where sid in (select sid from V$MYSTAT)`.
-# protocol: tcp/tcps # one of the strings "tcp" or "tcps" indicating whether to use unencrypted network traffic
-# or encrypted network traffic
-#
-# ----------------------------------------------------------------------
-# AUTO-DETECTION JOBS
-# only one of them will run (they have the same name)
-
-#local:
-# user: 'netdata'
-# password: 'secret'
-# server: 'localhost:1521'
-# service: 'XE'
-# protocol: 'tcps'
-
-#remote:
-# user: 'netdata'
-# password: 'secret'
-# server: '10.0.0.1:1521'
-# service: 'XE'
-# protocol: 'tcps'
diff --git a/src/collectors/python.d.plugin/pandas/integrations/pandas.md b/src/collectors/python.d.plugin/pandas/integrations/pandas.md
index e0b5418c5..b36bea073 100644
--- a/src/collectors/python.d.plugin/pandas/integrations/pandas.md
+++ b/src/collectors/python.d.plugin/pandas/integrations/pandas.md
@@ -108,8 +108,8 @@ sudo pip install 'sqlalchemy<2.0' psycopg2-binary
The configuration file name for this integration is `python.d/pandas.conf`.
-You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the
+Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
```bash
cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
@@ -136,8 +136,8 @@ Every configuration JOB starts with a `job_name` value which will appear in the
| chart_configs | an array of chart configuration dictionaries | [] | yes |
| chart_configs.name | name of the chart to be displayed in the dashboard. | None | yes |
| chart_configs.title | title of the chart to be displayed in the dashboard. | None | yes |
-| chart_configs.family | [family](/docs/dashboards-and-charts/netdata-charts.md#families) of the chart to be displayed in the dashboard. | None | yes |
-| chart_configs.context | [context](/docs/dashboards-and-charts/netdata-charts.md#contexts) of the chart to be displayed in the dashboard. | None | yes |
+| chart_configs.family | [family](https://github.com/netdata/netdata/blob/master/docs/dashboards-and-charts/netdata-charts.md#families) of the chart to be displayed in the dashboard. | None | yes |
+| chart_configs.context | [context](https://github.com/netdata/netdata/blob/master/docs/dashboards-and-charts/netdata-charts.md#contexts) of the chart to be displayed in the dashboard. | None | yes |
| chart_configs.type | the type of the chart to be displayed in the dashboard. | None | yes |
| chart_configs.units | the units of the chart to be displayed in the dashboard. | None | yes |
| chart_configs.df_steps | a series of pandas operations (one per line) that each returns a dataframe. | None | yes |
diff --git a/src/collectors/python.d.plugin/python.d.conf b/src/collectors/python.d.plugin/python.d.conf
index 4fcecc75d..e2ce1347e 100644
--- a/src/collectors/python.d.plugin/python.d.conf
+++ b/src/collectors/python.d.plugin/python.d.conf
@@ -26,30 +26,23 @@ gc_run: yes
gc_interval: 300
# am2320: yes
-# anomalies: no
-# boinc: yes
-# ceph: yes
# this is just an example
go_expvar: no
# haproxy: yes
-# openldap: yes
-# oracledb: yes
# pandas: yes
# retroshare: yes
-# samba: yes
# smartd_log: yes
-# spigotmc: yes
# traefik: yes
# varnish: yes
-# w1sensor: yes
-# zscores: no
## Disabled for existing installations.
adaptec_raid: no # Removed (replaced with go.d/adaptercraid).
apache: no # Removed (replaced with go.d/apache).
beanstalk: no # Removed (replaced with go.d/beanstalk).
+boinc: no # Removed (replaced with go.d/boinc).
dovecot: no # Removed (replaced with go.d/dovecot).
+ceph: no # Removed (replaced with go.d/ceph).
elasticsearch: no # Removed (replaced with go.d/elasticsearch).
exim: no # Removed (replaced with go.d/exim).
fail2ban: no # Removed (replaced with go.d/fail2ban).
@@ -68,15 +61,21 @@ mysql: no # Removed (replaced with go.d/mysql).
nginx: no # Removed (replaced with go.d/nginx).
nsd: no # Removed (replaced with go.d/nsd).
nvidia_smi: no # Removed (replaced with go.d/nvidia_smi).
+openldap: no # Removed (replaced with go.d/openldap).
+oracledb: no # Removed (replaced with go.d/oracledb).
postfix: no # Removed (replaced with go.d/postfix).
postgres: no # Removed (replaced with go.d/postgres).
proxysql: no # Removed (replaced with go.d/proxysql).
redis: no # Removed (replaced with go.d/redis).
rethinkdbs: no # Removed (replaced with go.d/rethinkdb).
riakkv: no # Removed (replaced with go.d/riak).
+samba: no # Removed (replaced with go.d/samba).
sensors: no # Removed (replaced with go.d/sensors).
squid: no # Removed (replaced with go.d/squid).
+spigotmc: no # Removed (replaced with go.d/spigotmc).
tomcat: no # Removed (replaced with go.d/tomcat)
tor: no # Removed (replaced with go.d/tor).
puppet: no # Removed (replaced with go.d/puppet).
uwsgi: no # Removed (replaced with go.d/uwsgi).
+varnish: no # Removed (replaced with go.d/varnish).
+w1sensor: no # Removed (replaced with go.d/w1sensor)
diff --git a/src/collectors/python.d.plugin/python.d.plugin.in b/src/collectors/python.d.plugin/python.d.plugin.in
index 81e68f94c..089fb5a58 100644
--- a/src/collectors/python.d.plugin/python.d.plugin.in
+++ b/src/collectors/python.d.plugin/python.d.plugin.in
@@ -12,7 +12,8 @@ do
done
if [ "$pybinary" = "" ]
then
- echo "ERROR python IS NOT AVAILABLE IN THIS SYSTEM"
+ echo 1>&2 "python.d ERROR: python is not installed on this system"
+ echo "EXIT"
exit 1
fi
exec "$pybinary" "$0" "${filtered[@]}" # '''
diff --git a/src/collectors/python.d.plugin/python_modules/bases/loaders.py b/src/collectors/python.d.plugin/python_modules/bases/loaders.py
index 095f3a3b1..6ffa2189d 100644
--- a/src/collectors/python.d.plugin/python_modules/bases/loaders.py
+++ b/src/collectors/python.d.plugin/python_modules/bases/loaders.py
@@ -3,27 +3,17 @@
# Author: Ilya Mashchenko (ilyam8)
# SPDX-License-Identifier: GPL-3.0-or-later
-
-from sys import version_info
-
-PY_VERSION = version_info[:2]
-
try:
- if PY_VERSION > (3, 1):
- from pyyaml3 import SafeLoader as YamlSafeLoader
- else:
- from pyyaml2 import SafeLoader as YamlSafeLoader
+ from pyyaml3 import SafeLoader as YamlSafeLoader
except ImportError:
from yaml import SafeLoader as YamlSafeLoader
-
try:
from collections import OrderedDict
except ImportError:
from third_party.ordereddict import OrderedDict
-
-DEFAULT_MAPPING_TAG = 'tag:yaml.org,2002:map' if PY_VERSION > (3, 1) else u'tag:yaml.org,2002:map'
+DEFAULT_MAPPING_TAG = 'tag:yaml.org,2002:map'
def dict_constructor(loader, node):
diff --git a/src/collectors/python.d.plugin/python_modules/pyyaml2/__init__.py b/src/collectors/python.d.plugin/python_modules/pyyaml2/__init__.py
deleted file mode 100644
index 4d560e438..000000000
--- a/src/collectors/python.d.plugin/python_modules/pyyaml2/__init__.py
+++ /dev/null
@@ -1,316 +0,0 @@
-# SPDX-License-Identifier: MIT
-
-from error import *
-
-from tokens import *
-from events import *
-from nodes import *
-
-from loader import *
-from dumper import *
-
-__version__ = '3.11'
-
-try:
- from cyaml import *
- __with_libyaml__ = True
-except ImportError:
- __with_libyaml__ = False
-
-def scan(stream, Loader=Loader):
- """
- Scan a YAML stream and produce scanning tokens.
- """
- loader = Loader(stream)
- try:
- while loader.check_token():
- yield loader.get_token()
- finally:
- loader.dispose()
-
-def parse(stream, Loader=Loader):
- """
- Parse a YAML stream and produce parsing events.
- """
- loader = Loader(stream)
- try:
- while loader.check_event():
- yield loader.get_event()
- finally:
- loader.dispose()
-
-def compose(stream, Loader=Loader):
- """
- Parse the first YAML document in a stream
- and produce the corresponding representation tree.
- """
- loader = Loader(stream)
- try:
- return loader.get_single_node()
- finally:
- loader.dispose()
-
-def compose_all(stream, Loader=Loader):
- """
- Parse all YAML documents in a stream
- and produce corresponding representation trees.
- """
- loader = Loader(stream)
- try:
- while loader.check_node():
- yield loader.get_node()
- finally:
- loader.dispose()
-
-def load(stream, Loader=Loader):
- """
- Parse the first YAML document in a stream
- and produce the corresponding Python object.
- """
- loader = Loader(stream)
- try:
- return loader.get_single_data()
- finally:
- loader.dispose()
-
-def load_all(stream, Loader=Loader):
- """
- Parse all YAML documents in a stream
- and produce corresponding Python objects.
- """
- loader = Loader(stream)
- try:
- while loader.check_data():
- yield loader.get_data()
- finally:
- loader.dispose()
-
-def safe_load(stream):
- """
- Parse the first YAML document in a stream
- and produce the corresponding Python object.
- Resolve only basic YAML tags.
- """
- return load(stream, SafeLoader)
-
-def safe_load_all(stream):
- """
- Parse all YAML documents in a stream
- and produce corresponding Python objects.
- Resolve only basic YAML tags.
- """
- return load_all(stream, SafeLoader)
-
-def emit(events, stream=None, Dumper=Dumper,
- canonical=None, indent=None, width=None,
- allow_unicode=None, line_break=None):
- """
- Emit YAML parsing events into a stream.
- If stream is None, return the produced string instead.
- """
- getvalue = None
- if stream is None:
- from StringIO import StringIO
- stream = StringIO()
- getvalue = stream.getvalue
- dumper = Dumper(stream, canonical=canonical, indent=indent, width=width,
- allow_unicode=allow_unicode, line_break=line_break)
- try:
- for event in events:
- dumper.emit(event)
- finally:
- dumper.dispose()
- if getvalue:
- return getvalue()
-
-def serialize_all(nodes, stream=None, Dumper=Dumper,
- canonical=None, indent=None, width=None,
- allow_unicode=None, line_break=None,
- encoding='utf-8', explicit_start=None, explicit_end=None,
- version=None, tags=None):
- """
- Serialize a sequence of representation trees into a YAML stream.
- If stream is None, return the produced string instead.
- """
- getvalue = None
- if stream is None:
- if encoding is None:
- from StringIO import StringIO
- else:
- from cStringIO import StringIO
- stream = StringIO()
- getvalue = stream.getvalue
- dumper = Dumper(stream, canonical=canonical, indent=indent, width=width,
- allow_unicode=allow_unicode, line_break=line_break,
- encoding=encoding, version=version, tags=tags,
- explicit_start=explicit_start, explicit_end=explicit_end)
- try:
- dumper.open()
- for node in nodes:
- dumper.serialize(node)
- dumper.close()
- finally:
- dumper.dispose()
- if getvalue:
- return getvalue()
-
-def serialize(node, stream=None, Dumper=Dumper, **kwds):
- """
- Serialize a representation tree into a YAML stream.
- If stream is None, return the produced string instead.
- """
- return serialize_all([node], stream, Dumper=Dumper, **kwds)
-
-def dump_all(documents, stream=None, Dumper=Dumper,
- default_style=None, default_flow_style=None,
- canonical=None, indent=None, width=None,
- allow_unicode=None, line_break=None,
- encoding='utf-8', explicit_start=None, explicit_end=None,
- version=None, tags=None):
- """
- Serialize a sequence of Python objects into a YAML stream.
- If stream is None, return the produced string instead.
- """
- getvalue = None
- if stream is None:
- if encoding is None:
- from StringIO import StringIO
- else:
- from cStringIO import StringIO
- stream = StringIO()
- getvalue = stream.getvalue
- dumper = Dumper(stream, default_style=default_style,
- default_flow_style=default_flow_style,
- canonical=canonical, indent=indent, width=width,
- allow_unicode=allow_unicode, line_break=line_break,
- encoding=encoding, version=version, tags=tags,
- explicit_start=explicit_start, explicit_end=explicit_end)
- try:
- dumper.open()
- for data in documents:
- dumper.represent(data)
- dumper.close()
- finally:
- dumper.dispose()
- if getvalue:
- return getvalue()
-
-def dump(data, stream=None, Dumper=Dumper, **kwds):
- """
- Serialize a Python object into a YAML stream.
- If stream is None, return the produced string instead.
- """
- return dump_all([data], stream, Dumper=Dumper, **kwds)
-
-def safe_dump_all(documents, stream=None, **kwds):
- """
- Serialize a sequence of Python objects into a YAML stream.
- Produce only basic YAML tags.
- If stream is None, return the produced string instead.
- """
- return dump_all(documents, stream, Dumper=SafeDumper, **kwds)
-
-def safe_dump(data, stream=None, **kwds):
- """
- Serialize a Python object into a YAML stream.
- Produce only basic YAML tags.
- If stream is None, return the produced string instead.
- """
- return dump_all([data], stream, Dumper=SafeDumper, **kwds)
-
-def add_implicit_resolver(tag, regexp, first=None,
- Loader=Loader, Dumper=Dumper):
- """
- Add an implicit scalar detector.
- If an implicit scalar value matches the given regexp,
- the corresponding tag is assigned to the scalar.
- first is a sequence of possible initial characters or None.
- """
- Loader.add_implicit_resolver(tag, regexp, first)
- Dumper.add_implicit_resolver(tag, regexp, first)
-
-def add_path_resolver(tag, path, kind=None, Loader=Loader, Dumper=Dumper):
- """
- Add a path based resolver for the given tag.
- A path is a list of keys that forms a path
- to a node in the representation tree.
- Keys can be string values, integers, or None.
- """
- Loader.add_path_resolver(tag, path, kind)
- Dumper.add_path_resolver(tag, path, kind)
-
-def add_constructor(tag, constructor, Loader=Loader):
- """
- Add a constructor for the given tag.
- Constructor is a function that accepts a Loader instance
- and a node object and produces the corresponding Python object.
- """
- Loader.add_constructor(tag, constructor)
-
-def add_multi_constructor(tag_prefix, multi_constructor, Loader=Loader):
- """
- Add a multi-constructor for the given tag prefix.
- Multi-constructor is called for a node if its tag starts with tag_prefix.
- Multi-constructor accepts a Loader instance, a tag suffix,
- and a node object and produces the corresponding Python object.
- """
- Loader.add_multi_constructor(tag_prefix, multi_constructor)
-
-def add_representer(data_type, representer, Dumper=Dumper):
- """
- Add a representer for the given type.
- Representer is a function accepting a Dumper instance
- and an instance of the given data type
- and producing the corresponding representation node.
- """
- Dumper.add_representer(data_type, representer)
-
-def add_multi_representer(data_type, multi_representer, Dumper=Dumper):
- """
- Add a representer for the given type.
- Multi-representer is a function accepting a Dumper instance
- and an instance of the given data type or subtype
- and producing the corresponding representation node.
- """
- Dumper.add_multi_representer(data_type, multi_representer)
-
-class YAMLObjectMetaclass(type):
- """
- The metaclass for YAMLObject.
- """
- def __init__(cls, name, bases, kwds):
- super(YAMLObjectMetaclass, cls).__init__(name, bases, kwds)
- if 'yaml_tag' in kwds and kwds['yaml_tag'] is not None:
- cls.yaml_loader.add_constructor(cls.yaml_tag, cls.from_yaml)
- cls.yaml_dumper.add_representer(cls, cls.to_yaml)
-
-class YAMLObject(object):
- """
- An object that can dump itself to a YAML stream
- and load itself from a YAML stream.
- """
-
- __metaclass__ = YAMLObjectMetaclass
- __slots__ = () # no direct instantiation, so allow immutable subclasses
-
- yaml_loader = Loader
- yaml_dumper = Dumper
-
- yaml_tag = None
- yaml_flow_style = None
-
- def from_yaml(cls, loader, node):
- """
- Convert a representation node to a Python object.
- """
- return loader.construct_yaml_object(node, cls)
- from_yaml = classmethod(from_yaml)
-
- def to_yaml(cls, dumper, data):
- """
- Convert a Python object to a representation node.
- """
- return dumper.represent_yaml_object(cls.yaml_tag, data, cls,
- flow_style=cls.yaml_flow_style)
- to_yaml = classmethod(to_yaml)
-
diff --git a/src/collectors/python.d.plugin/python_modules/pyyaml2/composer.py b/src/collectors/python.d.plugin/python_modules/pyyaml2/composer.py
deleted file mode 100644
index 6b41b8067..000000000
--- a/src/collectors/python.d.plugin/python_modules/pyyaml2/composer.py
+++ /dev/null
@@ -1,140 +0,0 @@
-# SPDX-License-Identifier: MIT
-
-__all__ = ['Composer', 'ComposerError']
-
-from error import MarkedYAMLError
-from events import *
-from nodes import *
-
-class ComposerError(MarkedYAMLError):
- pass
-
-class Composer(object):
-
- def __init__(self):
- self.anchors = {}
-
- def check_node(self):
- # Drop the STREAM-START event.
- if self.check_event(StreamStartEvent):
- self.get_event()
-
- # If there are more documents available?
- return not self.check_event(StreamEndEvent)
-
- def get_node(self):
- # Get the root node of the next document.
- if not self.check_event(StreamEndEvent):
- return self.compose_document()
-
- def get_single_node(self):
- # Drop the STREAM-START event.
- self.get_event()
-
- # Compose a document if the stream is not empty.
- document = None
- if not self.check_event(StreamEndEvent):
- document = self.compose_document()
-
- # Ensure that the stream contains no more documents.
- if not self.check_event(StreamEndEvent):
- event = self.get_event()
- raise ComposerError("expected a single document in the stream",
- document.start_mark, "but found another document",
- event.start_mark)
-
- # Drop the STREAM-END event.
- self.get_event()
-
- return document
-
- def compose_document(self):
- # Drop the DOCUMENT-START event.
- self.get_event()
-
- # Compose the root node.
- node = self.compose_node(None, None)
-
- # Drop the DOCUMENT-END event.
- self.get_event()
-
- self.anchors = {}
- return node
-
- def compose_node(self, parent, index):
- if self.check_event(AliasEvent):
- event = self.get_event()
- anchor = event.anchor
- if anchor not in self.anchors:
- raise ComposerError(None, None, "found undefined alias %r"
- % anchor.encode('utf-8'), event.start_mark)
- return self.anchors[anchor]
- event = self.peek_event()
- anchor = event.anchor
- if anchor is not None:
- if anchor in self.anchors:
- raise ComposerError("found duplicate anchor %r; first occurence"
- % anchor.encode('utf-8'), self.anchors[anchor].start_mark,
- "second occurence", event.start_mark)
- self.descend_resolver(parent, index)
- if self.check_event(ScalarEvent):
- node = self.compose_scalar_node(anchor)
- elif self.check_event(SequenceStartEvent):
- node = self.compose_sequence_node(anchor)
- elif self.check_event(MappingStartEvent):
- node = self.compose_mapping_node(anchor)
- self.ascend_resolver()
- return node
-
- def compose_scalar_node(self, anchor):
- event = self.get_event()
- tag = event.tag
- if tag is None or tag == u'!':
- tag = self.resolve(ScalarNode, event.value, event.implicit)
- node = ScalarNode(tag, event.value,
- event.start_mark, event.end_mark, style=event.style)
- if anchor is not None:
- self.anchors[anchor] = node
- return node
-
- def compose_sequence_node(self, anchor):
- start_event = self.get_event()
- tag = start_event.tag
- if tag is None or tag == u'!':
- tag = self.resolve(SequenceNode, None, start_event.implicit)
- node = SequenceNode(tag, [],
- start_event.start_mark, None,
- flow_style=start_event.flow_style)
- if anchor is not None:
- self.anchors[anchor] = node
- index = 0
- while not self.check_event(SequenceEndEvent):
- node.value.append(self.compose_node(node, index))
- index += 1
- end_event = self.get_event()
- node.end_mark = end_event.end_mark
- return node
-
- def compose_mapping_node(self, anchor):
- start_event = self.get_event()
- tag = start_event.tag
- if tag is None or tag == u'!':
- tag = self.resolve(MappingNode, None, start_event.implicit)
- node = MappingNode(tag, [],
- start_event.start_mark, None,
- flow_style=start_event.flow_style)
- if anchor is not None:
- self.anchors[anchor] = node
- while not self.check_event(MappingEndEvent):
- #key_event = self.peek_event()
- item_key = self.compose_node(node, None)
- #if item_key in node.value:
- # raise ComposerError("while composing a mapping", start_event.start_mark,
- # "found duplicate key", key_event.start_mark)
- item_value = self.compose_node(node, item_key)
- #node.value[item_key] = item_value
- node.value.append((item_key, item_value))
- end_event = self.get_event()
- node.end_mark = end_event.end_mark
- return node
-
diff --git a/src/collectors/python.d.plugin/python_modules/pyyaml2/constructor.py b/src/collectors/python.d.plugin/python_modules/pyyaml2/constructor.py
deleted file mode 100644
index 8ad1b90a7..000000000
--- a/src/collectors/python.d.plugin/python_modules/pyyaml2/constructor.py
+++ /dev/null
@@ -1,676 +0,0 @@
-# SPDX-License-Identifier: MIT
-
-__all__ = ['BaseConstructor', 'SafeConstructor', 'Constructor',
- 'ConstructorError']
-
-from error import *
-from nodes import *
-
-import datetime
-
-import binascii, re, sys, types
-
-class ConstructorError(MarkedYAMLError):
- pass
-
-class BaseConstructor(object):
-
- yaml_constructors = {}
- yaml_multi_constructors = {}
-
- def __init__(self):
- self.constructed_objects = {}
- self.recursive_objects = {}
- self.state_generators = []
- self.deep_construct = False
-
- def check_data(self):
- # If there are more documents available?
- return self.check_node()
-
- def get_data(self):
- # Construct and return the next document.
- if self.check_node():
- return self.construct_document(self.get_node())
-
- def get_single_data(self):
- # Ensure that the stream contains a single document and construct it.
- node = self.get_single_node()
- if node is not None:
- return self.construct_document(node)
- return None
-
- def construct_document(self, node):
- data = self.construct_object(node)
- while self.state_generators:
- state_generators = self.state_generators
- self.state_generators = []
- for generator in state_generators:
- for dummy in generator:
- pass
- self.constructed_objects = {}
- self.recursive_objects = {}
- self.deep_construct = False
- return data
-
- def construct_object(self, node, deep=False):
- if node in self.constructed_objects:
- return self.constructed_objects[node]
- if deep:
- old_deep = self.deep_construct
- self.deep_construct = True
- if node in self.recursive_objects:
- raise ConstructorError(None, None,
- "found unconstructable recursive node", node.start_mark)
- self.recursive_objects[node] = None
- constructor = None
- tag_suffix = None
- if node.tag in self.yaml_constructors:
- constructor = self.yaml_constructors[node.tag]
- else:
- for tag_prefix in self.yaml_multi_constructors:
- if node.tag.startswith(tag_prefix):
- tag_suffix = node.tag[len(tag_prefix):]
- constructor = self.yaml_multi_constructors[tag_prefix]
- break
- else:
- if None in self.yaml_multi_constructors:
- tag_suffix = node.tag
- constructor = self.yaml_multi_constructors[None]
- elif None in self.yaml_constructors:
- constructor = self.yaml_constructors[None]
- elif isinstance(node, ScalarNode):
- constructor = self.__class__.construct_scalar
- elif isinstance(node, SequenceNode):
- constructor = self.__class__.construct_sequence
- elif isinstance(node, MappingNode):
- constructor = self.__class__.construct_mapping
- if tag_suffix is None:
- data = constructor(self, node)
- else:
- data = constructor(self, tag_suffix, node)
- if isinstance(data, types.GeneratorType):
- generator = data
- data = generator.next()
- if self.deep_construct:
- for dummy in generator:
- pass
- else:
- self.state_generators.append(generator)
- self.constructed_objects[node] = data
- del self.recursive_objects[node]
- if deep:
- self.deep_construct = old_deep
- return data
-
- def construct_scalar(self, node):
- if not isinstance(node, ScalarNode):
- raise ConstructorError(None, None,
- "expected a scalar node, but found %s" % node.id,
- node.start_mark)
- return node.value
-
- def construct_sequence(self, node, deep=False):
- if not isinstance(node, SequenceNode):
- raise ConstructorError(None, None,
- "expected a sequence node, but found %s" % node.id,
- node.start_mark)
- return [self.construct_object(child, deep=deep)
- for child in node.value]
-
- def construct_mapping(self, node, deep=False):
- if not isinstance(node, MappingNode):
- raise ConstructorError(None, None,
- "expected a mapping node, but found %s" % node.id,
- node.start_mark)
- mapping = {}
- for key_node, value_node in node.value:
- key = self.construct_object(key_node, deep=deep)
- try:
- hash(key)
- except TypeError, exc:
- raise ConstructorError("while constructing a mapping", node.start_mark,
- "found unacceptable key (%s)" % exc, key_node.start_mark)
- value = self.construct_object(value_node, deep=deep)
- mapping[key] = value
- return mapping
-
- def construct_pairs(self, node, deep=False):
- if not isinstance(node, MappingNode):
- raise ConstructorError(None, None,
- "expected a mapping node, but found %s" % node.id,
- node.start_mark)
- pairs = []
- for key_node, value_node in node.value:
- key = self.construct_object(key_node, deep=deep)
- value = self.construct_object(value_node, deep=deep)
- pairs.append((key, value))
- return pairs
-
- def add_constructor(cls, tag, constructor):
- if not 'yaml_constructors' in cls.__dict__:
- cls.yaml_constructors = cls.yaml_constructors.copy()
- cls.yaml_constructors[tag] = constructor
- add_constructor = classmethod(add_constructor)
-
- def add_multi_constructor(cls, tag_prefix, multi_constructor):
- if not 'yaml_multi_constructors' in cls.__dict__:
- cls.yaml_multi_constructors = cls.yaml_multi_constructors.copy()
- cls.yaml_multi_constructors[tag_prefix] = multi_constructor
- add_multi_constructor = classmethod(add_multi_constructor)
-
-class SafeConstructor(BaseConstructor):
-
- def construct_scalar(self, node):
- if isinstance(node, MappingNode):
- for key_node, value_node in node.value:
- if key_node.tag == u'tag:yaml.org,2002:value':
- return self.construct_scalar(value_node)
- return BaseConstructor.construct_scalar(self, node)
-
- def flatten_mapping(self, node):
- merge = []
- index = 0
- while index < len(node.value):
- key_node, value_node = node.value[index]
- if key_node.tag == u'tag:yaml.org,2002:merge':
- del node.value[index]
- if isinstance(value_node, MappingNode):
- self.flatten_mapping(value_node)
- merge.extend(value_node.value)
- elif isinstance(value_node, SequenceNode):
- submerge = []
- for subnode in value_node.value:
- if not isinstance(subnode, MappingNode):
- raise ConstructorError("while constructing a mapping",
- node.start_mark,
- "expected a mapping for merging, but found %s"
- % subnode.id, subnode.start_mark)
- self.flatten_mapping(subnode)
- submerge.append(subnode.value)
- submerge.reverse()
- for value in submerge:
- merge.extend(value)
- else:
- raise ConstructorError("while constructing a mapping", node.start_mark,
- "expected a mapping or list of mappings for merging, but found %s"
- % value_node.id, value_node.start_mark)
- elif key_node.tag == u'tag:yaml.org,2002:value':
- key_node.tag = u'tag:yaml.org,2002:str'
- index += 1
- else:
- index += 1
- if merge:
- node.value = merge + node.value
-
- def construct_mapping(self, node, deep=False):
- if isinstance(node, MappingNode):
- self.flatten_mapping(node)
- return BaseConstructor.construct_mapping(self, node, deep=deep)
-
- def construct_yaml_null(self, node):
- self.construct_scalar(node)
- return None
-
- bool_values = {
- u'yes': True,
- u'no': False,
- u'true': True,
- u'false': False,
- u'on': True,
- u'off': False,
- }
-
- def construct_yaml_bool(self, node):
- value = self.construct_scalar(node)
- return self.bool_values[value.lower()]
-
- def construct_yaml_int(self, node):
- value = str(self.construct_scalar(node))
- value = value.replace('_', '')
- sign = +1
- if value[0] == '-':
- sign = -1
- if value[0] in '+-':
- value = value[1:]
- if value == '0':
- return 0
- elif value.startswith('0b'):
- return sign*int(value[2:], 2)
- elif value.startswith('0x'):
- return sign*int(value[2:], 16)
- elif value[0] == '0':
- return sign*int(value, 8)
- elif ':' in value:
- digits = [int(part) for part in value.split(':')]
- digits.reverse()
- base = 1
- value = 0
- for digit in digits:
- value += digit*base
- base *= 60
- return sign*value
- else:
- return sign*int(value)
-
- inf_value = 1e300
- while inf_value != inf_value*inf_value:
- inf_value *= inf_value
- nan_value = -inf_value/inf_value # Trying to make a quiet NaN (like C99).
-
- def construct_yaml_float(self, node):
- value = str(self.construct_scalar(node))
- value = value.replace('_', '').lower()
- sign = +1
- if value[0] == '-':
- sign = -1
- if value[0] in '+-':
- value = value[1:]
- if value == '.inf':
- return sign*self.inf_value
- elif value == '.nan':
- return self.nan_value
- elif ':' in value:
- digits = [float(part) for part in value.split(':')]
- digits.reverse()
- base = 1
- value = 0.0
- for digit in digits:
- value += digit*base
- base *= 60
- return sign*value
- else:
- return sign*float(value)
-
- def construct_yaml_binary(self, node):
- value = self.construct_scalar(node)
- try:
- return str(value).decode('base64')
- except (binascii.Error, UnicodeEncodeError), exc:
- raise ConstructorError(None, None,
- "failed to decode base64 data: %s" % exc, node.start_mark)
-
- timestamp_regexp = re.compile(
- ur'''^(?P<year>[0-9][0-9][0-9][0-9])
- -(?P<month>[0-9][0-9]?)
- -(?P<day>[0-9][0-9]?)
- (?:(?:[Tt]|[ \t]+)
- (?P<hour>[0-9][0-9]?)
- :(?P<minute>[0-9][0-9])
- :(?P<second>[0-9][0-9])
- (?:\.(?P<fraction>[0-9]*))?
- (?:[ \t]*(?P<tz>Z|(?P<tz_sign>[-+])(?P<tz_hour>[0-9][0-9]?)
- (?::(?P<tz_minute>[0-9][0-9]))?))?)?$''', re.X)
-
- def construct_yaml_timestamp(self, node):
- value = self.construct_scalar(node)
- match = self.timestamp_regexp.match(node.value)
- values = match.groupdict()
- year = int(values['year'])
- month = int(values['month'])
- day = int(values['day'])
- if not values['hour']:
- return datetime.date(year, month, day)
- hour = int(values['hour'])
- minute = int(values['minute'])
- second = int(values['second'])
- fraction = 0
- if values['fraction']:
- fraction = values['fraction'][:6]
- while len(fraction) < 6:
- fraction += '0'
- fraction = int(fraction)
- delta = None
- if values['tz_sign']:
- tz_hour = int(values['tz_hour'])
- tz_minute = int(values['tz_minute'] or 0)
- delta = datetime.timedelta(hours=tz_hour, minutes=tz_minute)
- if values['tz_sign'] == '-':
- delta = -delta
- data = datetime.datetime(year, month, day, hour, minute, second, fraction)
- if delta:
- data -= delta
- return data
-
- def construct_yaml_omap(self, node):
- # Note: we do not check for duplicate keys, because it's too
- # CPU-expensive.
- omap = []
- yield omap
- if not isinstance(node, SequenceNode):
- raise ConstructorError("while constructing an ordered map", node.start_mark,
- "expected a sequence, but found %s" % node.id, node.start_mark)
- for subnode in node.value:
- if not isinstance(subnode, MappingNode):
- raise ConstructorError("while constructing an ordered map", node.start_mark,
- "expected a mapping of length 1, but found %s" % subnode.id,
- subnode.start_mark)
- if len(subnode.value) != 1:
- raise ConstructorError("while constructing an ordered map", node.start_mark,
- "expected a single mapping item, but found %d items" % len(subnode.value),
- subnode.start_mark)
- key_node, value_node = subnode.value[0]
- key = self.construct_object(key_node)
- value = self.construct_object(value_node)
- omap.append((key, value))
-
- def construct_yaml_pairs(self, node):
- # Note: the same code as `construct_yaml_omap`.
- pairs = []
- yield pairs
- if not isinstance(node, SequenceNode):
- raise ConstructorError("while constructing pairs", node.start_mark,
- "expected a sequence, but found %s" % node.id, node.start_mark)
- for subnode in node.value:
- if not isinstance(subnode, MappingNode):
- raise ConstructorError("while constructing pairs", node.start_mark,
- "expected a mapping of length 1, but found %s" % subnode.id,
- subnode.start_mark)
- if len(subnode.value) != 1:
- raise ConstructorError("while constructing pairs", node.start_mark,
- "expected a single mapping item, but found %d items" % len(subnode.value),
- subnode.start_mark)
- key_node, value_node = subnode.value[0]
- key = self.construct_object(key_node)
- value = self.construct_object(value_node)
- pairs.append((key, value))
-
- def construct_yaml_set(self, node):
- data = set()
- yield data
- value = self.construct_mapping(node)
- data.update(value)
-
- def construct_yaml_str(self, node):
- value = self.construct_scalar(node)
- try:
- return value.encode('ascii')
- except UnicodeEncodeError:
- return value
-
- def construct_yaml_seq(self, node):
- data = []
- yield data
- data.extend(self.construct_sequence(node))
-
- def construct_yaml_map(self, node):
- data = {}
- yield data
- value = self.construct_mapping(node)
- data.update(value)
-
- def construct_yaml_object(self, node, cls):
- data = cls.__new__(cls)
- yield data
- if hasattr(data, '__setstate__'):
- state = self.construct_mapping(node, deep=True)
- data.__setstate__(state)
- else:
- state = self.construct_mapping(node)
- data.__dict__.update(state)
-
- def construct_undefined(self, node):
- raise ConstructorError(None, None,
- "could not determine a constructor for the tag %r" % node.tag.encode('utf-8'),
- node.start_mark)
-
-SafeConstructor.add_constructor(
- u'tag:yaml.org,2002:null',
- SafeConstructor.construct_yaml_null)
-
-SafeConstructor.add_constructor(
- u'tag:yaml.org,2002:bool',
- SafeConstructor.construct_yaml_bool)
-
-SafeConstructor.add_constructor(
- u'tag:yaml.org,2002:int',
- SafeConstructor.construct_yaml_int)
-
-SafeConstructor.add_constructor(
- u'tag:yaml.org,2002:float',
- SafeConstructor.construct_yaml_float)
-
-SafeConstructor.add_constructor(
- u'tag:yaml.org,2002:binary',
- SafeConstructor.construct_yaml_binary)
-
-SafeConstructor.add_constructor(
- u'tag:yaml.org,2002:timestamp',
- SafeConstructor.construct_yaml_timestamp)
-
-SafeConstructor.add_constructor(
- u'tag:yaml.org,2002:omap',
- SafeConstructor.construct_yaml_omap)
-
-SafeConstructor.add_constructor(
- u'tag:yaml.org,2002:pairs',
- SafeConstructor.construct_yaml_pairs)
-
-SafeConstructor.add_constructor(
- u'tag:yaml.org,2002:set',
- SafeConstructor.construct_yaml_set)
-
-SafeConstructor.add_constructor(
- u'tag:yaml.org,2002:str',
- SafeConstructor.construct_yaml_str)
-
-SafeConstructor.add_constructor(
- u'tag:yaml.org,2002:seq',
- SafeConstructor.construct_yaml_seq)
-
-SafeConstructor.add_constructor(
- u'tag:yaml.org,2002:map',
- SafeConstructor.construct_yaml_map)
-
-SafeConstructor.add_constructor(None,
- SafeConstructor.construct_undefined)
-
-class Constructor(SafeConstructor):
-
- def construct_python_str(self, node):
- return self.construct_scalar(node).encode('utf-8')
-
- def construct_python_unicode(self, node):
- return self.construct_scalar(node)
-
- def construct_python_long(self, node):
- return long(self.construct_yaml_int(node))
-
- def construct_python_complex(self, node):
- return complex(self.construct_scalar(node))
-
- def construct_python_tuple(self, node):
- return tuple(self.construct_sequence(node))
-
- def find_python_module(self, name, mark):
- if not name:
- raise ConstructorError("while constructing a Python module", mark,
- "expected non-empty name appended to the tag", mark)
- try:
- __import__(name)
- except ImportError, exc:
- raise ConstructorError("while constructing a Python module", mark,
- "cannot find module %r (%s)" % (name.encode('utf-8'), exc), mark)
- return sys.modules[name]
-
- def find_python_name(self, name, mark):
- if not name:
- raise ConstructorError("while constructing a Python object", mark,
- "expected non-empty name appended to the tag", mark)
- if u'.' in name:
- module_name, object_name = name.rsplit('.', 1)
- else:
- module_name = '__builtin__'
- object_name = name
- try:
- __import__(module_name)
- except ImportError, exc:
- raise ConstructorError("while constructing a Python object", mark,
- "cannot find module %r (%s)" % (module_name.encode('utf-8'), exc), mark)
- module = sys.modules[module_name]
- if not hasattr(module, object_name):
- raise ConstructorError("while constructing a Python object", mark,
- "cannot find %r in the module %r" % (object_name.encode('utf-8'),
- module.__name__), mark)
- return getattr(module, object_name)
-
- def construct_python_name(self, suffix, node):
- value = self.construct_scalar(node)
- if value:
- raise ConstructorError("while constructing a Python name", node.start_mark,
- "expected the empty value, but found %r" % value.encode('utf-8'),
- node.start_mark)
- return self.find_python_name(suffix, node.start_mark)
-
- def construct_python_module(self, suffix, node):
- value = self.construct_scalar(node)
- if value:
- raise ConstructorError("while constructing a Python module", node.start_mark,
- "expected the empty value, but found %r" % value.encode('utf-8'),
- node.start_mark)
- return self.find_python_module(suffix, node.start_mark)
-
- class classobj: pass
-
- def make_python_instance(self, suffix, node,
- args=None, kwds=None, newobj=False):
- if not args:
- args = []
- if not kwds:
- kwds = {}
- cls = self.find_python_name(suffix, node.start_mark)
- if newobj and isinstance(cls, type(self.classobj)) \
- and not args and not kwds:
- instance = self.classobj()
- instance.__class__ = cls
- return instance
- elif newobj and isinstance(cls, type):
- return cls.__new__(cls, *args, **kwds)
- else:
- return cls(*args, **kwds)
-
- def set_python_instance_state(self, instance, state):
- if hasattr(instance, '__setstate__'):
- instance.__setstate__(state)
- else:
- slotstate = {}
- if isinstance(state, tuple) and len(state) == 2:
- state, slotstate = state
- if hasattr(instance, '__dict__'):
- instance.__dict__.update(state)
- elif state:
- slotstate.update(state)
- for key, value in slotstate.items():
- setattr(object, key, value)
-
- def construct_python_object(self, suffix, node):
- # Format:
- # !!python/object:module.name { ... state ... }
- instance = self.make_python_instance(suffix, node, newobj=True)
- yield instance
- deep = hasattr(instance, '__setstate__')
- state = self.construct_mapping(node, deep=deep)
- self.set_python_instance_state(instance, state)
-
- def construct_python_object_apply(self, suffix, node, newobj=False):
- # Format:
- # !!python/object/apply # (or !!python/object/new)
- # args: [ ... arguments ... ]
- # kwds: { ... keywords ... }
- # state: ... state ...
- # listitems: [ ... listitems ... ]
- # dictitems: { ... dictitems ... }
- # or short format:
- # !!python/object/apply [ ... arguments ... ]
- # The difference between !!python/object/apply and !!python/object/new
- # is how an object is created, check make_python_instance for details.
- if isinstance(node, SequenceNode):
- args = self.construct_sequence(node, deep=True)
- kwds = {}
- state = {}
- listitems = []
- dictitems = {}
- else:
- value = self.construct_mapping(node, deep=True)
- args = value.get('args', [])
- kwds = value.get('kwds', {})
- state = value.get('state', {})
- listitems = value.get('listitems', [])
- dictitems = value.get('dictitems', {})
- instance = self.make_python_instance(suffix, node, args, kwds, newobj)
- if state:
- self.set_python_instance_state(instance, state)
- if listitems:
- instance.extend(listitems)
- if dictitems:
- for key in dictitems:
- instance[key] = dictitems[key]
- return instance
-
- def construct_python_object_new(self, suffix, node):
- return self.construct_python_object_apply(suffix, node, newobj=True)
-
-Constructor.add_constructor(
- u'tag:yaml.org,2002:python/none',
- Constructor.construct_yaml_null)
-
-Constructor.add_constructor(
- u'tag:yaml.org,2002:python/bool',
- Constructor.construct_yaml_bool)
-
-Constructor.add_constructor(
- u'tag:yaml.org,2002:python/str',
- Constructor.construct_python_str)
-
-Constructor.add_constructor(
- u'tag:yaml.org,2002:python/unicode',
- Constructor.construct_python_unicode)
-
-Constructor.add_constructor(
- u'tag:yaml.org,2002:python/int',
- Constructor.construct_yaml_int)
-
-Constructor.add_constructor(
- u'tag:yaml.org,2002:python/long',
- Constructor.construct_python_long)
-
-Constructor.add_constructor(
- u'tag:yaml.org,2002:python/float',
- Constructor.construct_yaml_float)
-
-Constructor.add_constructor(
- u'tag:yaml.org,2002:python/complex',
- Constructor.construct_python_complex)
-
-Constructor.add_constructor(
- u'tag:yaml.org,2002:python/list',
- Constructor.construct_yaml_seq)
-
-Constructor.add_constructor(
- u'tag:yaml.org,2002:python/tuple',
- Constructor.construct_python_tuple)
-
-Constructor.add_constructor(
- u'tag:yaml.org,2002:python/dict',
- Constructor.construct_yaml_map)
-
-Constructor.add_multi_constructor(
- u'tag:yaml.org,2002:python/name:',
- Constructor.construct_python_name)
-
-Constructor.add_multi_constructor(
- u'tag:yaml.org,2002:python/module:',
- Constructor.construct_python_module)
-
-Constructor.add_multi_constructor(
- u'tag:yaml.org,2002:python/object:',
- Constructor.construct_python_object)
-
-Constructor.add_multi_constructor(
- u'tag:yaml.org,2002:python/object/apply:',
- Constructor.construct_python_object_apply)
-
-Constructor.add_multi_constructor(
- u'tag:yaml.org,2002:python/object/new:',
- Constructor.construct_python_object_new)
-
diff --git a/src/collectors/python.d.plugin/python_modules/pyyaml2/cyaml.py b/src/collectors/python.d.plugin/python_modules/pyyaml2/cyaml.py
deleted file mode 100644
index 2858ab479..000000000
--- a/src/collectors/python.d.plugin/python_modules/pyyaml2/cyaml.py
+++ /dev/null
@@ -1,86 +0,0 @@
-# SPDX-License-Identifier: MIT
-
-__all__ = ['CBaseLoader', 'CSafeLoader', 'CLoader',
- 'CBaseDumper', 'CSafeDumper', 'CDumper']
-
-from _yaml import CParser, CEmitter
-
-from constructor import *
-
-from serializer import *
-from representer import *
-
-from resolver import *
-
-class CBaseLoader(CParser, BaseConstructor, BaseResolver):
-
- def __init__(self, stream):
- CParser.__init__(self, stream)
- BaseConstructor.__init__(self)
- BaseResolver.__init__(self)
-
-class CSafeLoader(CParser, SafeConstructor, Resolver):
-
- def __init__(self, stream):
- CParser.__init__(self, stream)
- SafeConstructor.__init__(self)
- Resolver.__init__(self)
-
-class CLoader(CParser, Constructor, Resolver):
-
- def __init__(self, stream):
- CParser.__init__(self, stream)
- Constructor.__init__(self)
- Resolver.__init__(self)
-
-class CBaseDumper(CEmitter, BaseRepresenter, BaseResolver):
-
- def __init__(self, stream,
- default_style=None, default_flow_style=None,
- canonical=None, indent=None, width=None,
- allow_unicode=None, line_break=None,
- encoding=None, explicit_start=None, explicit_end=None,
- version=None, tags=None):
- CEmitter.__init__(self, stream, canonical=canonical,
- indent=indent, width=width, encoding=encoding,
- allow_unicode=allow_unicode, line_break=line_break,
- explicit_start=explicit_start, explicit_end=explicit_end,
- version=version, tags=tags)
- Representer.__init__(self, default_style=default_style,
- default_flow_style=default_flow_style)
- Resolver.__init__(self)
-
-class CSafeDumper(CEmitter, SafeRepresenter, Resolver):
-
- def __init__(self, stream,
- default_style=None, default_flow_style=None,
- canonical=None, indent=None, width=None,
- allow_unicode=None, line_break=None,
- encoding=None, explicit_start=None, explicit_end=None,
- version=None, tags=None):
- CEmitter.__init__(self, stream, canonical=canonical,
- indent=indent, width=width, encoding=encoding,
- allow_unicode=allow_unicode, line_break=line_break,
- explicit_start=explicit_start, explicit_end=explicit_end,
- version=version, tags=tags)
- SafeRepresenter.__init__(self, default_style=default_style,
- default_flow_style=default_flow_style)
- Resolver.__init__(self)
-
-class CDumper(CEmitter, Serializer, Representer, Resolver):
-
- def __init__(self, stream,
- default_style=None, default_flow_style=None,
- canonical=None, indent=None, width=None,
- allow_unicode=None, line_break=None,
- encoding=None, explicit_start=None, explicit_end=None,
- version=None, tags=None):
- CEmitter.__init__(self, stream, canonical=canonical,
- indent=indent, width=width, encoding=encoding,
- allow_unicode=allow_unicode, line_break=line_break,
- explicit_start=explicit_start, explicit_end=explicit_end,
- version=version, tags=tags)
- Representer.__init__(self, default_style=default_style,
- default_flow_style=default_flow_style)
- Resolver.__init__(self)
-
diff --git a/src/collectors/python.d.plugin/python_modules/pyyaml2/dumper.py b/src/collectors/python.d.plugin/python_modules/pyyaml2/dumper.py
deleted file mode 100644
index 3685cbeeb..000000000
--- a/src/collectors/python.d.plugin/python_modules/pyyaml2/dumper.py
+++ /dev/null
@@ -1,63 +0,0 @@
-# SPDX-License-Identifier: MIT
-
-__all__ = ['BaseDumper', 'SafeDumper', 'Dumper']
-
-from emitter import *
-from serializer import *
-from representer import *
-from resolver import *
-
-class BaseDumper(Emitter, Serializer, BaseRepresenter, BaseResolver):
-
- def __init__(self, stream,
- default_style=None, default_flow_style=None,
- canonical=None, indent=None, width=None,
- allow_unicode=None, line_break=None,
- encoding=None, explicit_start=None, explicit_end=None,
- version=None, tags=None):
- Emitter.__init__(self, stream, canonical=canonical,
- indent=indent, width=width,
- allow_unicode=allow_unicode, line_break=line_break)
- Serializer.__init__(self, encoding=encoding,
- explicit_start=explicit_start, explicit_end=explicit_end,
- version=version, tags=tags)
- Representer.__init__(self, default_style=default_style,
- default_flow_style=default_flow_style)
- Resolver.__init__(self)
-
-class SafeDumper(Emitter, Serializer, SafeRepresenter, Resolver):
-
- def __init__(self, stream,
- default_style=None, default_flow_style=None,
- canonical=None, indent=None, width=None,
- allow_unicode=None, line_break=None,
- encoding=None, explicit_start=None, explicit_end=None,
- version=None, tags=None):
- Emitter.__init__(self, stream, canonical=canonical,
- indent=indent, width=width,
- allow_unicode=allow_unicode, line_break=line_break)
- Serializer.__init__(self, encoding=encoding,
- explicit_start=explicit_start, explicit_end=explicit_end,
- version=version, tags=tags)
- SafeRepresenter.__init__(self, default_style=default_style,
- default_flow_style=default_flow_style)
- Resolver.__init__(self)
-
-class Dumper(Emitter, Serializer, Representer, Resolver):
-
- def __init__(self, stream,
- default_style=None, default_flow_style=None,
- canonical=None, indent=None, width=None,
- allow_unicode=None, line_break=None,
- encoding=None, explicit_start=None, explicit_end=None,
- version=None, tags=None):
- Emitter.__init__(self, stream, canonical=canonical,
- indent=indent, width=width,
- allow_unicode=allow_unicode, line_break=line_break)
- Serializer.__init__(self, encoding=encoding,
- explicit_start=explicit_start, explicit_end=explicit_end,
- version=version, tags=tags)
- Representer.__init__(self, default_style=default_style,
- default_flow_style=default_flow_style)
- Resolver.__init__(self)
-
diff --git a/src/collectors/python.d.plugin/python_modules/pyyaml2/emitter.py b/src/collectors/python.d.plugin/python_modules/pyyaml2/emitter.py
deleted file mode 100644
index 9a460a0fd..000000000
--- a/src/collectors/python.d.plugin/python_modules/pyyaml2/emitter.py
+++ /dev/null
@@ -1,1141 +0,0 @@
-# SPDX-License-Identifier: MIT
-
-# Emitter expects events obeying the following grammar:
-# stream ::= STREAM-START document* STREAM-END
-# document ::= DOCUMENT-START node DOCUMENT-END
-# node ::= SCALAR | sequence | mapping
-# sequence ::= SEQUENCE-START node* SEQUENCE-END
-# mapping ::= MAPPING-START (node node)* MAPPING-END
-
-__all__ = ['Emitter', 'EmitterError']
-
-from error import YAMLError
-from events import *
-
-class EmitterError(YAMLError):
- pass
-
-class ScalarAnalysis(object):
- def __init__(self, scalar, empty, multiline,
- allow_flow_plain, allow_block_plain,
- allow_single_quoted, allow_double_quoted,
- allow_block):
- self.scalar = scalar
- self.empty = empty
- self.multiline = multiline
- self.allow_flow_plain = allow_flow_plain
- self.allow_block_plain = allow_block_plain
- self.allow_single_quoted = allow_single_quoted
- self.allow_double_quoted = allow_double_quoted
- self.allow_block = allow_block
-
-class Emitter(object):
-
- DEFAULT_TAG_PREFIXES = {
- u'!' : u'!',
- u'tag:yaml.org,2002:' : u'!!',
- }
-
- def __init__(self, stream, canonical=None, indent=None, width=None,
- allow_unicode=None, line_break=None):
-
- # The stream should have the methods `write` and possibly `flush`.
- self.stream = stream
-
- # Encoding can be overriden by STREAM-START.
- self.encoding = None
-
- # Emitter is a state machine with a stack of states to handle nested
- # structures.
- self.states = []
- self.state = self.expect_stream_start
-
- # Current event and the event queue.
- self.events = []
- self.event = None
-
- # The current indentation level and the stack of previous indents.
- self.indents = []
- self.indent = None
-
- # Flow level.
- self.flow_level = 0
-
- # Contexts.
- self.root_context = False
- self.sequence_context = False
- self.mapping_context = False
- self.simple_key_context = False
-
- # Characteristics of the last emitted character:
- # - current position.
- # - is it a whitespace?
- # - is it an indention character
- # (indentation space, '-', '?', or ':')?
- self.line = 0
- self.column = 0
- self.whitespace = True
- self.indention = True
-
- # Whether the document requires an explicit document indicator
- self.open_ended = False
-
- # Formatting details.
- self.canonical = canonical
- self.allow_unicode = allow_unicode
- self.best_indent = 2
- if indent and 1 < indent < 10:
- self.best_indent = indent
- self.best_width = 80
- if width and width > self.best_indent*2:
- self.best_width = width
- self.best_line_break = u'\n'
- if line_break in [u'\r', u'\n', u'\r\n']:
- self.best_line_break = line_break
-
- # Tag prefixes.
- self.tag_prefixes = None
-
- # Prepared anchor and tag.
- self.prepared_anchor = None
- self.prepared_tag = None
-
- # Scalar analysis and style.
- self.analysis = None
- self.style = None
-
- def dispose(self):
- # Reset the state attributes (to clear self-references)
- self.states = []
- self.state = None
-
- def emit(self, event):
- self.events.append(event)
- while not self.need_more_events():
- self.event = self.events.pop(0)
- self.state()
- self.event = None
-
- # In some cases, we wait for a few next events before emitting.
-
- def need_more_events(self):
- if not self.events:
- return True
- event = self.events[0]
- if isinstance(event, DocumentStartEvent):
- return self.need_events(1)
- elif isinstance(event, SequenceStartEvent):
- return self.need_events(2)
- elif isinstance(event, MappingStartEvent):
- return self.need_events(3)
- else:
- return False
-
- def need_events(self, count):
- level = 0
- for event in self.events[1:]:
- if isinstance(event, (DocumentStartEvent, CollectionStartEvent)):
- level += 1
- elif isinstance(event, (DocumentEndEvent, CollectionEndEvent)):
- level -= 1
- elif isinstance(event, StreamEndEvent):
- level = -1
- if level < 0:
- return False
- return (len(self.events) < count+1)
-
- def increase_indent(self, flow=False, indentless=False):
- self.indents.append(self.indent)
- if self.indent is None:
- if flow:
- self.indent = self.best_indent
- else:
- self.indent = 0
- elif not indentless:
- self.indent += self.best_indent
-
- # States.
-
- # Stream handlers.
-
- def expect_stream_start(self):
- if isinstance(self.event, StreamStartEvent):
- if self.event.encoding and not getattr(self.stream, 'encoding', None):
- self.encoding = self.event.encoding
- self.write_stream_start()
- self.state = self.expect_first_document_start
- else:
- raise EmitterError("expected StreamStartEvent, but got %s"
- % self.event)
-
- def expect_nothing(self):
- raise EmitterError("expected nothing, but got %s" % self.event)
-
- # Document handlers.
-
- def expect_first_document_start(self):
- return self.expect_document_start(first=True)
-
- def expect_document_start(self, first=False):
- if isinstance(self.event, DocumentStartEvent):
- if (self.event.version or self.event.tags) and self.open_ended:
- self.write_indicator(u'...', True)
- self.write_indent()
- if self.event.version:
- version_text = self.prepare_version(self.event.version)
- self.write_version_directive(version_text)
- self.tag_prefixes = self.DEFAULT_TAG_PREFIXES.copy()
- if self.event.tags:
- handles = self.event.tags.keys()
- handles.sort()
- for handle in handles:
- prefix = self.event.tags[handle]
- self.tag_prefixes[prefix] = handle
- handle_text = self.prepare_tag_handle(handle)
- prefix_text = self.prepare_tag_prefix(prefix)
- self.write_tag_directive(handle_text, prefix_text)
- implicit = (first and not self.event.explicit and not self.canonical
- and not self.event.version and not self.event.tags
- and not self.check_empty_document())
- if not implicit:
- self.write_indent()
- self.write_indicator(u'---', True)
- if self.canonical:
- self.write_indent()
- self.state = self.expect_document_root
- elif isinstance(self.event, StreamEndEvent):
- if self.open_ended:
- self.write_indicator(u'...', True)
- self.write_indent()
- self.write_stream_end()
- self.state = self.expect_nothing
- else:
- raise EmitterError("expected DocumentStartEvent, but got %s"
- % self.event)
-
- def expect_document_end(self):
- if isinstance(self.event, DocumentEndEvent):
- self.write_indent()
- if self.event.explicit:
- self.write_indicator(u'...', True)
- self.write_indent()
- self.flush_stream()
- self.state = self.expect_document_start
- else:
- raise EmitterError("expected DocumentEndEvent, but got %s"
- % self.event)
-
- def expect_document_root(self):
- self.states.append(self.expect_document_end)
- self.expect_node(root=True)
-
- # Node handlers.
-
- def expect_node(self, root=False, sequence=False, mapping=False,
- simple_key=False):
- self.root_context = root
- self.sequence_context = sequence
- self.mapping_context = mapping
- self.simple_key_context = simple_key
- if isinstance(self.event, AliasEvent):
- self.expect_alias()
- elif isinstance(self.event, (ScalarEvent, CollectionStartEvent)):
- self.process_anchor(u'&')
- self.process_tag()
- if isinstance(self.event, ScalarEvent):
- self.expect_scalar()
- elif isinstance(self.event, SequenceStartEvent):
- if self.flow_level or self.canonical or self.event.flow_style \
- or self.check_empty_sequence():
- self.expect_flow_sequence()
- else:
- self.expect_block_sequence()
- elif isinstance(self.event, MappingStartEvent):
- if self.flow_level or self.canonical or self.event.flow_style \
- or self.check_empty_mapping():
- self.expect_flow_mapping()
- else:
- self.expect_block_mapping()
- else:
- raise EmitterError("expected NodeEvent, but got %s" % self.event)
-
- def expect_alias(self):
- if self.event.anchor is None:
- raise EmitterError("anchor is not specified for alias")
- self.process_anchor(u'*')
- self.state = self.states.pop()
-
- def expect_scalar(self):
- self.increase_indent(flow=True)
- self.process_scalar()
- self.indent = self.indents.pop()
- self.state = self.states.pop()
-
- # Flow sequence handlers.
-
- def expect_flow_sequence(self):
- self.write_indicator(u'[', True, whitespace=True)
- self.flow_level += 1
- self.increase_indent(flow=True)
- self.state = self.expect_first_flow_sequence_item
-
- def expect_first_flow_sequence_item(self):
- if isinstance(self.event, SequenceEndEvent):
- self.indent = self.indents.pop()
- self.flow_level -= 1
- self.write_indicator(u']', False)
- self.state = self.states.pop()
- else:
- if self.canonical or self.column > self.best_width:
- self.write_indent()
- self.states.append(self.expect_flow_sequence_item)
- self.expect_node(sequence=True)
-
- def expect_flow_sequence_item(self):
- if isinstance(self.event, SequenceEndEvent):
- self.indent = self.indents.pop()
- self.flow_level -= 1
- if self.canonical:
- self.write_indicator(u',', False)
- self.write_indent()
- self.write_indicator(u']', False)
- self.state = self.states.pop()
- else:
- self.write_indicator(u',', False)
- if self.canonical or self.column > self.best_width:
- self.write_indent()
- self.states.append(self.expect_flow_sequence_item)
- self.expect_node(sequence=True)
-
- # Flow mapping handlers.
-
- def expect_flow_mapping(self):
- self.write_indicator(u'{', True, whitespace=True)
- self.flow_level += 1
- self.increase_indent(flow=True)
- self.state = self.expect_first_flow_mapping_key
-
- def expect_first_flow_mapping_key(self):
- if isinstance(self.event, MappingEndEvent):
- self.indent = self.indents.pop()
- self.flow_level -= 1
- self.write_indicator(u'}', False)
- self.state = self.states.pop()
- else:
- if self.canonical or self.column > self.best_width:
- self.write_indent()
- if not self.canonical and self.check_simple_key():
- self.states.append(self.expect_flow_mapping_simple_value)
- self.expect_node(mapping=True, simple_key=True)
- else:
- self.write_indicator(u'?', True)
- self.states.append(self.expect_flow_mapping_value)
- self.expect_node(mapping=True)
-
- def expect_flow_mapping_key(self):
- if isinstance(self.event, MappingEndEvent):
- self.indent = self.indents.pop()
- self.flow_level -= 1
- if self.canonical:
- self.write_indicator(u',', False)
- self.write_indent()
- self.write_indicator(u'}', False)
- self.state = self.states.pop()
- else:
- self.write_indicator(u',', False)
- if self.canonical or self.column > self.best_width:
- self.write_indent()
- if not self.canonical and self.check_simple_key():
- self.states.append(self.expect_flow_mapping_simple_value)
- self.expect_node(mapping=True, simple_key=True)
- else:
- self.write_indicator(u'?', True)
- self.states.append(self.expect_flow_mapping_value)
- self.expect_node(mapping=True)
-
- def expect_flow_mapping_simple_value(self):
- self.write_indicator(u':', False)
- self.states.append(self.expect_flow_mapping_key)
- self.expect_node(mapping=True)
-
- def expect_flow_mapping_value(self):
- if self.canonical or self.column > self.best_width:
- self.write_indent()
- self.write_indicator(u':', True)
- self.states.append(self.expect_flow_mapping_key)
- self.expect_node(mapping=True)
-
- # Block sequence handlers.
-
- def expect_block_sequence(self):
- indentless = (self.mapping_context and not self.indention)
- self.increase_indent(flow=False, indentless=indentless)
- self.state = self.expect_first_block_sequence_item
-
- def expect_first_block_sequence_item(self):
- return self.expect_block_sequence_item(first=True)
-
- def expect_block_sequence_item(self, first=False):
- if not first and isinstance(self.event, SequenceEndEvent):
- self.indent = self.indents.pop()
- self.state = self.states.pop()
- else:
- self.write_indent()
- self.write_indicator(u'-', True, indention=True)
- self.states.append(self.expect_block_sequence_item)
- self.expect_node(sequence=True)
-
- # Block mapping handlers.
-
- def expect_block_mapping(self):
- self.increase_indent(flow=False)
- self.state = self.expect_first_block_mapping_key
-
- def expect_first_block_mapping_key(self):
- return self.expect_block_mapping_key(first=True)
-
- def expect_block_mapping_key(self, first=False):
- if not first and isinstance(self.event, MappingEndEvent):
- self.indent = self.indents.pop()
- self.state = self.states.pop()
- else:
- self.write_indent()
- if self.check_simple_key():
- self.states.append(self.expect_block_mapping_simple_value)
- self.expect_node(mapping=True, simple_key=True)
- else:
- self.write_indicator(u'?', True, indention=True)
- self.states.append(self.expect_block_mapping_value)
- self.expect_node(mapping=True)
-
- def expect_block_mapping_simple_value(self):
- self.write_indicator(u':', False)
- self.states.append(self.expect_block_mapping_key)
- self.expect_node(mapping=True)
-
- def expect_block_mapping_value(self):
- self.write_indent()
- self.write_indicator(u':', True, indention=True)
- self.states.append(self.expect_block_mapping_key)
- self.expect_node(mapping=True)
-
- # Checkers.
-
- def check_empty_sequence(self):
- return (isinstance(self.event, SequenceStartEvent) and self.events
- and isinstance(self.events[0], SequenceEndEvent))
-
- def check_empty_mapping(self):
- return (isinstance(self.event, MappingStartEvent) and self.events
- and isinstance(self.events[0], MappingEndEvent))
-
- def check_empty_document(self):
- if not isinstance(self.event, DocumentStartEvent) or not self.events:
- return False
- event = self.events[0]
- return (isinstance(event, ScalarEvent) and event.anchor is None
- and event.tag is None and event.implicit and event.value == u'')
-
- def check_simple_key(self):
- length = 0
- if isinstance(self.event, NodeEvent) and self.event.anchor is not None:
- if self.prepared_anchor is None:
- self.prepared_anchor = self.prepare_anchor(self.event.anchor)
- length += len(self.prepared_anchor)
- if isinstance(self.event, (ScalarEvent, CollectionStartEvent)) \
- and self.event.tag is not None:
- if self.prepared_tag is None:
- self.prepared_tag = self.prepare_tag(self.event.tag)
- length += len(self.prepared_tag)
- if isinstance(self.event, ScalarEvent):
- if self.analysis is None:
- self.analysis = self.analyze_scalar(self.event.value)
- length += len(self.analysis.scalar)
- return (length < 128 and (isinstance(self.event, AliasEvent)
- or (isinstance(self.event, ScalarEvent)
- and not self.analysis.empty and not self.analysis.multiline)
- or self.check_empty_sequence() or self.check_empty_mapping()))
-
- # Anchor, Tag, and Scalar processors.
-
- def process_anchor(self, indicator):
- if self.event.anchor is None:
- self.prepared_anchor = None
- return
- if self.prepared_anchor is None:
- self.prepared_anchor = self.prepare_anchor(self.event.anchor)
- if self.prepared_anchor:
- self.write_indicator(indicator+self.prepared_anchor, True)
- self.prepared_anchor = None
-
- def process_tag(self):
- tag = self.event.tag
- if isinstance(self.event, ScalarEvent):
- if self.style is None:
- self.style = self.choose_scalar_style()
- if ((not self.canonical or tag is None) and
- ((self.style == '' and self.event.implicit[0])
- or (self.style != '' and self.event.implicit[1]))):
- self.prepared_tag = None
- return
- if self.event.implicit[0] and tag is None:
- tag = u'!'
- self.prepared_tag = None
- else:
- if (not self.canonical or tag is None) and self.event.implicit:
- self.prepared_tag = None
- return
- if tag is None:
- raise EmitterError("tag is not specified")
- if self.prepared_tag is None:
- self.prepared_tag = self.prepare_tag(tag)
- if self.prepared_tag:
- self.write_indicator(self.prepared_tag, True)
- self.prepared_tag = None
-
- def choose_scalar_style(self):
- if self.analysis is None:
- self.analysis = self.analyze_scalar(self.event.value)
- if self.event.style == '"' or self.canonical:
- return '"'
- if not self.event.style and self.event.implicit[0]:
- if (not (self.simple_key_context and
- (self.analysis.empty or self.analysis.multiline))
- and (self.flow_level and self.analysis.allow_flow_plain
- or (not self.flow_level and self.analysis.allow_block_plain))):
- return ''
- if self.event.style and self.event.style in '|>':
- if (not self.flow_level and not self.simple_key_context
- and self.analysis.allow_block):
- return self.event.style
- if not self.event.style or self.event.style == '\'':
- if (self.analysis.allow_single_quoted and
- not (self.simple_key_context and self.analysis.multiline)):
- return '\''
- return '"'
-
- def process_scalar(self):
- if self.analysis is None:
- self.analysis = self.analyze_scalar(self.event.value)
- if self.style is None:
- self.style = self.choose_scalar_style()
- split = (not self.simple_key_context)
- #if self.analysis.multiline and split \
- # and (not self.style or self.style in '\'\"'):
- # self.write_indent()
- if self.style == '"':
- self.write_double_quoted(self.analysis.scalar, split)
- elif self.style == '\'':
- self.write_single_quoted(self.analysis.scalar, split)
- elif self.style == '>':
- self.write_folded(self.analysis.scalar)
- elif self.style == '|':
- self.write_literal(self.analysis.scalar)
- else:
- self.write_plain(self.analysis.scalar, split)
- self.analysis = None
- self.style = None
-
- # Analyzers.
-
- def prepare_version(self, version):
- major, minor = version
- if major != 1:
- raise EmitterError("unsupported YAML version: %d.%d" % (major, minor))
- return u'%d.%d' % (major, minor)
-
- def prepare_tag_handle(self, handle):
- if not handle:
- raise EmitterError("tag handle must not be empty")
- if handle[0] != u'!' or handle[-1] != u'!':
- raise EmitterError("tag handle must start and end with '!': %r"
- % (handle.encode('utf-8')))
- for ch in handle[1:-1]:
- if not (u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \
- or ch in u'-_'):
- raise EmitterError("invalid character %r in the tag handle: %r"
- % (ch.encode('utf-8'), handle.encode('utf-8')))
- return handle
-
- def prepare_tag_prefix(self, prefix):
- if not prefix:
- raise EmitterError("tag prefix must not be empty")
- chunks = []
- start = end = 0
- if prefix[0] == u'!':
- end = 1
- while end < len(prefix):
- ch = prefix[end]
- if u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \
- or ch in u'-;/?!:@&=+$,_.~*\'()[]':
- end += 1
- else:
- if start < end:
- chunks.append(prefix[start:end])
- start = end = end+1
- data = ch.encode('utf-8')
- for ch in data:
- chunks.append(u'%%%02X' % ord(ch))
- if start < end:
- chunks.append(prefix[start:end])
- return u''.join(chunks)
-
- def prepare_tag(self, tag):
- if not tag:
- raise EmitterError("tag must not be empty")
- if tag == u'!':
- return tag
- handle = None
- suffix = tag
- prefixes = self.tag_prefixes.keys()
- prefixes.sort()
- for prefix in prefixes:
- if tag.startswith(prefix) \
- and (prefix == u'!' or len(prefix) < len(tag)):
- handle = self.tag_prefixes[prefix]
- suffix = tag[len(prefix):]
- chunks = []
- start = end = 0
- while end < len(suffix):
- ch = suffix[end]
- if u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \
- or ch in u'-;/?:@&=+$,_.~*\'()[]' \
- or (ch == u'!' and handle != u'!'):
- end += 1
- else:
- if start < end:
- chunks.append(suffix[start:end])
- start = end = end+1
- data = ch.encode('utf-8')
- for ch in data:
- chunks.append(u'%%%02X' % ord(ch))
- if start < end:
- chunks.append(suffix[start:end])
- suffix_text = u''.join(chunks)
- if handle:
- return u'%s%s' % (handle, suffix_text)
- else:
- return u'!<%s>' % suffix_text
-
- def prepare_anchor(self, anchor):
- if not anchor:
- raise EmitterError("anchor must not be empty")
- for ch in anchor:
- if not (u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \
- or ch in u'-_'):
- raise EmitterError("invalid character %r in the anchor: %r"
- % (ch.encode('utf-8'), anchor.encode('utf-8')))
- return anchor
-
- def analyze_scalar(self, scalar):
-
- # Empty scalar is a special case.
- if not scalar:
- return ScalarAnalysis(scalar=scalar, empty=True, multiline=False,
- allow_flow_plain=False, allow_block_plain=True,
- allow_single_quoted=True, allow_double_quoted=True,
- allow_block=False)
-
- # Indicators and special characters.
- block_indicators = False
- flow_indicators = False
- line_breaks = False
- special_characters = False
-
- # Important whitespace combinations.
- leading_space = False
- leading_break = False
- trailing_space = False
- trailing_break = False
- break_space = False
- space_break = False
-
- # Check document indicators.
- if scalar.startswith(u'---') or scalar.startswith(u'...'):
- block_indicators = True
- flow_indicators = True
-
- # First character or preceded by a whitespace.
- preceeded_by_whitespace = True
-
- # Last character or followed by a whitespace.
- followed_by_whitespace = (len(scalar) == 1 or
- scalar[1] in u'\0 \t\r\n\x85\u2028\u2029')
-
- # The previous character is a space.
- previous_space = False
-
- # The previous character is a break.
- previous_break = False
-
- index = 0
- while index < len(scalar):
- ch = scalar[index]
-
- # Check for indicators.
- if index == 0:
- # Leading indicators are special characters.
- if ch in u'#,[]{}&*!|>\'\"%@`':
- flow_indicators = True
- block_indicators = True
- if ch in u'?:':
- flow_indicators = True
- if followed_by_whitespace:
- block_indicators = True
- if ch == u'-' and followed_by_whitespace:
- flow_indicators = True
- block_indicators = True
- else:
- # Some indicators cannot appear within a scalar as well.
- if ch in u',?[]{}':
- flow_indicators = True
- if ch == u':':
- flow_indicators = True
- if followed_by_whitespace:
- block_indicators = True
- if ch == u'#' and preceeded_by_whitespace:
- flow_indicators = True
- block_indicators = True
-
- # Check for line breaks, special, and unicode characters.
- if ch in u'\n\x85\u2028\u2029':
- line_breaks = True
- if not (ch == u'\n' or u'\x20' <= ch <= u'\x7E'):
- if (ch == u'\x85' or u'\xA0' <= ch <= u'\uD7FF'
- or u'\uE000' <= ch <= u'\uFFFD') and ch != u'\uFEFF':
- unicode_characters = True
- if not self.allow_unicode:
- special_characters = True
- else:
- special_characters = True
-
- # Detect important whitespace combinations.
- if ch == u' ':
- if index == 0:
- leading_space = True
- if index == len(scalar)-1:
- trailing_space = True
- if previous_break:
- break_space = True
- previous_space = True
- previous_break = False
- elif ch in u'\n\x85\u2028\u2029':
- if index == 0:
- leading_break = True
- if index == len(scalar)-1:
- trailing_break = True
- if previous_space:
- space_break = True
- previous_space = False
- previous_break = True
- else:
- previous_space = False
- previous_break = False
-
- # Prepare for the next character.
- index += 1
- preceeded_by_whitespace = (ch in u'\0 \t\r\n\x85\u2028\u2029')
- followed_by_whitespace = (index+1 >= len(scalar) or
- scalar[index+1] in u'\0 \t\r\n\x85\u2028\u2029')
-
- # Let's decide what styles are allowed.
- allow_flow_plain = True
- allow_block_plain = True
- allow_single_quoted = True
- allow_double_quoted = True
- allow_block = True
-
- # Leading and trailing whitespaces are bad for plain scalars.
- if (leading_space or leading_break
- or trailing_space or trailing_break):
- allow_flow_plain = allow_block_plain = False
-
- # We do not permit trailing spaces for block scalars.
- if trailing_space:
- allow_block = False
-
- # Spaces at the beginning of a new line are only acceptable for block
- # scalars.
- if break_space:
- allow_flow_plain = allow_block_plain = allow_single_quoted = False
-
- # Spaces followed by breaks, as well as special character are only
- # allowed for double quoted scalars.
- if space_break or special_characters:
- allow_flow_plain = allow_block_plain = \
- allow_single_quoted = allow_block = False
-
- # Although the plain scalar writer supports breaks, we never emit
- # multiline plain scalars.
- if line_breaks:
- allow_flow_plain = allow_block_plain = False
-
- # Flow indicators are forbidden for flow plain scalars.
- if flow_indicators:
- allow_flow_plain = False
-
- # Block indicators are forbidden for block plain scalars.
- if block_indicators:
- allow_block_plain = False
-
- return ScalarAnalysis(scalar=scalar,
- empty=False, multiline=line_breaks,
- allow_flow_plain=allow_flow_plain,
- allow_block_plain=allow_block_plain,
- allow_single_quoted=allow_single_quoted,
- allow_double_quoted=allow_double_quoted,
- allow_block=allow_block)
-
- # Writers.
-
- def flush_stream(self):
- if hasattr(self.stream, 'flush'):
- self.stream.flush()
-
- def write_stream_start(self):
- # Write BOM if needed.
- if self.encoding and self.encoding.startswith('utf-16'):
- self.stream.write(u'\uFEFF'.encode(self.encoding))
-
- def write_stream_end(self):
- self.flush_stream()
-
- def write_indicator(self, indicator, need_whitespace,
- whitespace=False, indention=False):
- if self.whitespace or not need_whitespace:
- data = indicator
- else:
- data = u' '+indicator
- self.whitespace = whitespace
- self.indention = self.indention and indention
- self.column += len(data)
- self.open_ended = False
- if self.encoding:
- data = data.encode(self.encoding)
- self.stream.write(data)
-
- def write_indent(self):
- indent = self.indent or 0
- if not self.indention or self.column > indent \
- or (self.column == indent and not self.whitespace):
- self.write_line_break()
- if self.column < indent:
- self.whitespace = True
- data = u' '*(indent-self.column)
- self.column = indent
- if self.encoding:
- data = data.encode(self.encoding)
- self.stream.write(data)
-
- def write_line_break(self, data=None):
- if data is None:
- data = self.best_line_break
- self.whitespace = True
- self.indention = True
- self.line += 1
- self.column = 0
- if self.encoding:
- data = data.encode(self.encoding)
- self.stream.write(data)
-
- def write_version_directive(self, version_text):
- data = u'%%YAML %s' % version_text
- if self.encoding:
- data = data.encode(self.encoding)
- self.stream.write(data)
- self.write_line_break()
-
- def write_tag_directive(self, handle_text, prefix_text):
- data = u'%%TAG %s %s' % (handle_text, prefix_text)
- if self.encoding:
- data = data.encode(self.encoding)
- self.stream.write(data)
- self.write_line_break()
-
- # Scalar streams.
-
- def write_single_quoted(self, text, split=True):
- self.write_indicator(u'\'', True)
- spaces = False
- breaks = False
- start = end = 0
- while end <= len(text):
- ch = None
- if end < len(text):
- ch = text[end]
- if spaces:
- if ch is None or ch != u' ':
- if start+1 == end and self.column > self.best_width and split \
- and start != 0 and end != len(text):
- self.write_indent()
- else:
- data = text[start:end]
- self.column += len(data)
- if self.encoding:
- data = data.encode(self.encoding)
- self.stream.write(data)
- start = end
- elif breaks:
- if ch is None or ch not in u'\n\x85\u2028\u2029':
- if text[start] == u'\n':
- self.write_line_break()
- for br in text[start:end]:
- if br == u'\n':
- self.write_line_break()
- else:
- self.write_line_break(br)
- self.write_indent()
- start = end
- else:
- if ch is None or ch in u' \n\x85\u2028\u2029' or ch == u'\'':
- if start < end:
- data = text[start:end]
- self.column += len(data)
- if self.encoding:
- data = data.encode(self.encoding)
- self.stream.write(data)
- start = end
- if ch == u'\'':
- data = u'\'\''
- self.column += 2
- if self.encoding:
- data = data.encode(self.encoding)
- self.stream.write(data)
- start = end + 1
- if ch is not None:
- spaces = (ch == u' ')
- breaks = (ch in u'\n\x85\u2028\u2029')
- end += 1
- self.write_indicator(u'\'', False)
-
- ESCAPE_REPLACEMENTS = {
- u'\0': u'0',
- u'\x07': u'a',
- u'\x08': u'b',
- u'\x09': u't',
- u'\x0A': u'n',
- u'\x0B': u'v',
- u'\x0C': u'f',
- u'\x0D': u'r',
- u'\x1B': u'e',
- u'\"': u'\"',
- u'\\': u'\\',
- u'\x85': u'N',
- u'\xA0': u'_',
- u'\u2028': u'L',
- u'\u2029': u'P',
- }
-
- def write_double_quoted(self, text, split=True):
- self.write_indicator(u'"', True)
- start = end = 0
- while end <= len(text):
- ch = None
- if end < len(text):
- ch = text[end]
- if ch is None or ch in u'"\\\x85\u2028\u2029\uFEFF' \
- or not (u'\x20' <= ch <= u'\x7E'
- or (self.allow_unicode
- and (u'\xA0' <= ch <= u'\uD7FF'
- or u'\uE000' <= ch <= u'\uFFFD'))):
- if start < end:
- data = text[start:end]
- self.column += len(data)
- if self.encoding:
- data = data.encode(self.encoding)
- self.stream.write(data)
- start = end
- if ch is not None:
- if ch in self.ESCAPE_REPLACEMENTS:
- data = u'\\'+self.ESCAPE_REPLACEMENTS[ch]
- elif ch <= u'\xFF':
- data = u'\\x%02X' % ord(ch)
- elif ch <= u'\uFFFF':
- data = u'\\u%04X' % ord(ch)
- else:
- data = u'\\U%08X' % ord(ch)
- self.column += len(data)
- if self.encoding:
- data = data.encode(self.encoding)
- self.stream.write(data)
- start = end+1
- if 0 < end < len(text)-1 and (ch == u' ' or start >= end) \
- and self.column+(end-start) > self.best_width and split:
- data = text[start:end]+u'\\'
- if start < end:
- start = end
- self.column += len(data)
- if self.encoding:
- data = data.encode(self.encoding)
- self.stream.write(data)
- self.write_indent()
- self.whitespace = False
- self.indention = False
- if text[start] == u' ':
- data = u'\\'
- self.column += len(data)
- if self.encoding:
- data = data.encode(self.encoding)
- self.stream.write(data)
- end += 1
- self.write_indicator(u'"', False)
-
- def determine_block_hints(self, text):
- hints = u''
- if text:
- if text[0] in u' \n\x85\u2028\u2029':
- hints += unicode(self.best_indent)
- if text[-1] not in u'\n\x85\u2028\u2029':
- hints += u'-'
- elif len(text) == 1 or text[-2] in u'\n\x85\u2028\u2029':
- hints += u'+'
- return hints
-
- def write_folded(self, text):
- hints = self.determine_block_hints(text)
- self.write_indicator(u'>'+hints, True)
- if hints[-1:] == u'+':
- self.open_ended = True
- self.write_line_break()
- leading_space = True
- spaces = False
- breaks = True
- start = end = 0
- while end <= len(text):
- ch = None
- if end < len(text):
- ch = text[end]
- if breaks:
- if ch is None or ch not in u'\n\x85\u2028\u2029':
- if not leading_space and ch is not None and ch != u' ' \
- and text[start] == u'\n':
- self.write_line_break()
- leading_space = (ch == u' ')
- for br in text[start:end]:
- if br == u'\n':
- self.write_line_break()
- else:
- self.write_line_break(br)
- if ch is not None:
- self.write_indent()
- start = end
- elif spaces:
- if ch != u' ':
- if start+1 == end and self.column > self.best_width:
- self.write_indent()
- else:
- data = text[start:end]
- self.column += len(data)
- if self.encoding:
- data = data.encode(self.encoding)
- self.stream.write(data)
- start = end
- else:
- if ch is None or ch in u' \n\x85\u2028\u2029':
- data = text[start:end]
- self.column += len(data)
- if self.encoding:
- data = data.encode(self.encoding)
- self.stream.write(data)
- if ch is None:
- self.write_line_break()
- start = end
- if ch is not None:
- breaks = (ch in u'\n\x85\u2028\u2029')
- spaces = (ch == u' ')
- end += 1
-
- def write_literal(self, text):
- hints = self.determine_block_hints(text)
- self.write_indicator(u'|'+hints, True)
- if hints[-1:] == u'+':
- self.open_ended = True
- self.write_line_break()
- breaks = True
- start = end = 0
- while end <= len(text):
- ch = None
- if end < len(text):
- ch = text[end]
- if breaks:
- if ch is None or ch not in u'\n\x85\u2028\u2029':
- for br in text[start:end]:
- if br == u'\n':
- self.write_line_break()
- else:
- self.write_line_break(br)
- if ch is not None:
- self.write_indent()
- start = end
- else:
- if ch is None or ch in u'\n\x85\u2028\u2029':
- data = text[start:end]
- if self.encoding:
- data = data.encode(self.encoding)
- self.stream.write(data)
- if ch is None:
- self.write_line_break()
- start = end
- if ch is not None:
- breaks = (ch in u'\n\x85\u2028\u2029')
- end += 1
-
- def write_plain(self, text, split=True):
- if self.root_context:
- self.open_ended = True
- if not text:
- return
- if not self.whitespace:
- data = u' '
- self.column += len(data)
- if self.encoding:
- data = data.encode(self.encoding)
- self.stream.write(data)
- self.whitespace = False
- self.indention = False
- spaces = False
- breaks = False
- start = end = 0
- while end <= len(text):
- ch = None
- if end < len(text):
- ch = text[end]
- if spaces:
- if ch != u' ':
- if start+1 == end and self.column > self.best_width and split:
- self.write_indent()
- self.whitespace = False
- self.indention = False
- else:
- data = text[start:end]
- self.column += len(data)
- if self.encoding:
- data = data.encode(self.encoding)
- self.stream.write(data)
- start = end
- elif breaks:
- if ch not in u'\n\x85\u2028\u2029':
- if text[start] == u'\n':
- self.write_line_break()
- for br in text[start:end]:
- if br == u'\n':
- self.write_line_break()
- else:
- self.write_line_break(br)
- self.write_indent()
- self.whitespace = False
- self.indention = False
- start = end
- else:
- if ch is None or ch in u' \n\x85\u2028\u2029':
- data = text[start:end]
- self.column += len(data)
- if self.encoding:
- data = data.encode(self.encoding)
- self.stream.write(data)
- start = end
- if ch is not None:
- spaces = (ch == u' ')
- breaks = (ch in u'\n\x85\u2028\u2029')
- end += 1
-
diff --git a/src/collectors/python.d.plugin/python_modules/pyyaml2/error.py b/src/collectors/python.d.plugin/python_modules/pyyaml2/error.py
deleted file mode 100644
index 5466be721..000000000
--- a/src/collectors/python.d.plugin/python_modules/pyyaml2/error.py
+++ /dev/null
@@ -1,76 +0,0 @@
-# SPDX-License-Identifier: MIT
-
-__all__ = ['Mark', 'YAMLError', 'MarkedYAMLError']
-
-class Mark(object):
-
- def __init__(self, name, index, line, column, buffer, pointer):
- self.name = name
- self.index = index
- self.line = line
- self.column = column
- self.buffer = buffer
- self.pointer = pointer
-
- def get_snippet(self, indent=4, max_length=75):
- if self.buffer is None:
- return None
- head = ''
- start = self.pointer
- while start > 0 and self.buffer[start-1] not in u'\0\r\n\x85\u2028\u2029':
- start -= 1
- if self.pointer-start > max_length/2-1:
- head = ' ... '
- start += 5
- break
- tail = ''
- end = self.pointer
- while end < len(self.buffer) and self.buffer[end] not in u'\0\r\n\x85\u2028\u2029':
- end += 1
- if end-self.pointer > max_length/2-1:
- tail = ' ... '
- end -= 5
- break
- snippet = self.buffer[start:end].encode('utf-8')
- return ' '*indent + head + snippet + tail + '\n' \
- + ' '*(indent+self.pointer-start+len(head)) + '^'
-
- def __str__(self):
- snippet = self.get_snippet()
- where = " in \"%s\", line %d, column %d" \
- % (self.name, self.line+1, self.column+1)
- if snippet is not None:
- where += ":\n"+snippet
- return where
-
-class YAMLError(Exception):
- pass
-
-class MarkedYAMLError(YAMLError):
-
- def __init__(self, context=None, context_mark=None,
- problem=None, problem_mark=None, note=None):
- self.context = context
- self.context_mark = context_mark
- self.problem = problem
- self.problem_mark = problem_mark
- self.note = note
-
- def __str__(self):
- lines = []
- if self.context is not None:
- lines.append(self.context)
- if self.context_mark is not None \
- and (self.problem is None or self.problem_mark is None
- or self.context_mark.name != self.problem_mark.name
- or self.context_mark.line != self.problem_mark.line
- or self.context_mark.column != self.problem_mark.column):
- lines.append(str(self.context_mark))
- if self.problem is not None:
- lines.append(self.problem)
- if self.problem_mark is not None:
- lines.append(str(self.problem_mark))
- if self.note is not None:
- lines.append(self.note)
- return '\n'.join(lines)
-
diff --git a/src/collectors/python.d.plugin/python_modules/pyyaml2/events.py b/src/collectors/python.d.plugin/python_modules/pyyaml2/events.py
deleted file mode 100644
index 283452add..000000000
--- a/src/collectors/python.d.plugin/python_modules/pyyaml2/events.py
+++ /dev/null
@@ -1,87 +0,0 @@
-# SPDX-License-Identifier: MIT
-
-# Abstract classes.
-
-class Event(object):
- def __init__(self, start_mark=None, end_mark=None):
- self.start_mark = start_mark
- self.end_mark = end_mark
- def __repr__(self):
- attributes = [key for key in ['anchor', 'tag', 'implicit', 'value']
- if hasattr(self, key)]
- arguments = ', '.join(['%s=%r' % (key, getattr(self, key))
- for key in attributes])
- return '%s(%s)' % (self.__class__.__name__, arguments)
-
-class NodeEvent(Event):
- def __init__(self, anchor, start_mark=None, end_mark=None):
- self.anchor = anchor
- self.start_mark = start_mark
- self.end_mark = end_mark
-
-class CollectionStartEvent(NodeEvent):
- def __init__(self, anchor, tag, implicit, start_mark=None, end_mark=None,
- flow_style=None):
- self.anchor = anchor
- self.tag = tag
- self.implicit = implicit
- self.start_mark = start_mark
- self.end_mark = end_mark
- self.flow_style = flow_style
-
-class CollectionEndEvent(Event):
- pass
-
-# Implementations.
-
-class StreamStartEvent(Event):
- def __init__(self, start_mark=None, end_mark=None, encoding=None):
- self.start_mark = start_mark
- self.end_mark = end_mark
- self.encoding = encoding
-
-class StreamEndEvent(Event):
- pass
-
-class DocumentStartEvent(Event):
- def __init__(self, start_mark=None, end_mark=None,
- explicit=None, version=None, tags=None):
- self.start_mark = start_mark
- self.end_mark = end_mark
- self.explicit = explicit
- self.version = version
- self.tags = tags
-
-class DocumentEndEvent(Event):
- def __init__(self, start_mark=None, end_mark=None,
- explicit=None):
- self.start_mark = start_mark
- self.end_mark = end_mark
- self.explicit = explicit
-
-class AliasEvent(NodeEvent):
- pass
-
-class ScalarEvent(NodeEvent):
- def __init__(self, anchor, tag, implicit, value,
- start_mark=None, end_mark=None, style=None):
- self.anchor = anchor
- self.tag = tag
- self.implicit = implicit
- self.value = value
- self.start_mark = start_mark
- self.end_mark = end_mark
- self.style = style
-
-class SequenceStartEvent(CollectionStartEvent):
- pass
-
-class SequenceEndEvent(CollectionEndEvent):
- pass
-
-class MappingStartEvent(CollectionStartEvent):
- pass
-
-class MappingEndEvent(CollectionEndEvent):
- pass
-
diff --git a/src/collectors/python.d.plugin/python_modules/pyyaml2/loader.py b/src/collectors/python.d.plugin/python_modules/pyyaml2/loader.py
deleted file mode 100644
index 1c195531f..000000000
--- a/src/collectors/python.d.plugin/python_modules/pyyaml2/loader.py
+++ /dev/null
@@ -1,41 +0,0 @@
-# SPDX-License-Identifier: MIT
-
-__all__ = ['BaseLoader', 'SafeLoader', 'Loader']
-
-from reader import *
-from scanner import *
-from parser import *
-from composer import *
-from constructor import *
-from resolver import *
-
-class BaseLoader(Reader, Scanner, Parser, Composer, BaseConstructor, BaseResolver):
-
- def __init__(self, stream):
- Reader.__init__(self, stream)
- Scanner.__init__(self)
- Parser.__init__(self)
- Composer.__init__(self)
- BaseConstructor.__init__(self)
- BaseResolver.__init__(self)
-
-class SafeLoader(Reader, Scanner, Parser, Composer, SafeConstructor, Resolver):
-
- def __init__(self, stream):
- Reader.__init__(self, stream)
- Scanner.__init__(self)
- Parser.__init__(self)
- Composer.__init__(self)
- SafeConstructor.__init__(self)
- Resolver.__init__(self)
-
-class Loader(Reader, Scanner, Parser, Composer, Constructor, Resolver):
-
- def __init__(self, stream):
- Reader.__init__(self, stream)
- Scanner.__init__(self)
- Parser.__init__(self)
- Composer.__init__(self)
- Constructor.__init__(self)
- Resolver.__init__(self)
-
diff --git a/src/collectors/python.d.plugin/python_modules/pyyaml2/nodes.py b/src/collectors/python.d.plugin/python_modules/pyyaml2/nodes.py
deleted file mode 100644
index ed2a1b43e..000000000
--- a/src/collectors/python.d.plugin/python_modules/pyyaml2/nodes.py
+++ /dev/null
@@ -1,50 +0,0 @@
-# SPDX-License-Identifier: MIT
-
-class Node(object):
- def __init__(self, tag, value, start_mark, end_mark):
- self.tag = tag
- self.value = value
- self.start_mark = start_mark
- self.end_mark = end_mark
- def __repr__(self):
- value = self.value
- #if isinstance(value, list):
- # if len(value) == 0:
- # value = '<empty>'
- # elif len(value) == 1:
- # value = '<1 item>'
- # else:
- # value = '<%d items>' % len(value)
- #else:
- # if len(value) > 75:
- # value = repr(value[:70]+u' ... ')
- # else:
- # value = repr(value)
- value = repr(value)
- return '%s(tag=%r, value=%s)' % (self.__class__.__name__, self.tag, value)
-
-class ScalarNode(Node):
- id = 'scalar'
- def __init__(self, tag, value,
- start_mark=None, end_mark=None, style=None):
- self.tag = tag
- self.value = value
- self.start_mark = start_mark
- self.end_mark = end_mark
- self.style = style
-
-class CollectionNode(Node):
- def __init__(self, tag, value,
- start_mark=None, end_mark=None, flow_style=None):
- self.tag = tag
- self.value = value
- self.start_mark = start_mark
- self.end_mark = end_mark
- self.flow_style = flow_style
-
-class SequenceNode(CollectionNode):
- id = 'sequence'
-
-class MappingNode(CollectionNode):
- id = 'mapping'
-
diff --git a/src/collectors/python.d.plugin/python_modules/pyyaml2/parser.py b/src/collectors/python.d.plugin/python_modules/pyyaml2/parser.py
deleted file mode 100644
index 97ba08337..000000000
--- a/src/collectors/python.d.plugin/python_modules/pyyaml2/parser.py
+++ /dev/null
@@ -1,590 +0,0 @@
-# SPDX-License-Identifier: MIT
-
-# The following YAML grammar is LL(1) and is parsed by a recursive descent
-# parser.
-#
-# stream ::= STREAM-START implicit_document? explicit_document* STREAM-END
-# implicit_document ::= block_node DOCUMENT-END*
-# explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
-# block_node_or_indentless_sequence ::=
-# ALIAS
-# | properties (block_content | indentless_block_sequence)?
-# | block_content
-# | indentless_block_sequence
-# block_node ::= ALIAS
-# | properties block_content?
-# | block_content
-# flow_node ::= ALIAS
-# | properties flow_content?
-# | flow_content
-# properties ::= TAG ANCHOR? | ANCHOR TAG?
-# block_content ::= block_collection | flow_collection | SCALAR
-# flow_content ::= flow_collection | SCALAR
-# block_collection ::= block_sequence | block_mapping
-# flow_collection ::= flow_sequence | flow_mapping
-# block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END
-# indentless_sequence ::= (BLOCK-ENTRY block_node?)+
-# block_mapping ::= BLOCK-MAPPING_START
-# ((KEY block_node_or_indentless_sequence?)?
-# (VALUE block_node_or_indentless_sequence?)?)*
-# BLOCK-END
-# flow_sequence ::= FLOW-SEQUENCE-START
-# (flow_sequence_entry FLOW-ENTRY)*
-# flow_sequence_entry?
-# FLOW-SEQUENCE-END
-# flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
-# flow_mapping ::= FLOW-MAPPING-START
-# (flow_mapping_entry FLOW-ENTRY)*
-# flow_mapping_entry?
-# FLOW-MAPPING-END
-# flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
-#
-# FIRST sets:
-#
-# stream: { STREAM-START }
-# explicit_document: { DIRECTIVE DOCUMENT-START }
-# implicit_document: FIRST(block_node)
-# block_node: { ALIAS TAG ANCHOR SCALAR BLOCK-SEQUENCE-START BLOCK-MAPPING-START FLOW-SEQUENCE-START FLOW-MAPPING-START }
-# flow_node: { ALIAS ANCHOR TAG SCALAR FLOW-SEQUENCE-START FLOW-MAPPING-START }
-# block_content: { BLOCK-SEQUENCE-START BLOCK-MAPPING-START FLOW-SEQUENCE-START FLOW-MAPPING-START SCALAR }
-# flow_content: { FLOW-SEQUENCE-START FLOW-MAPPING-START SCALAR }
-# block_collection: { BLOCK-SEQUENCE-START BLOCK-MAPPING-START }
-# flow_collection: { FLOW-SEQUENCE-START FLOW-MAPPING-START }
-# block_sequence: { BLOCK-SEQUENCE-START }
-# block_mapping: { BLOCK-MAPPING-START }
-# block_node_or_indentless_sequence: { ALIAS ANCHOR TAG SCALAR BLOCK-SEQUENCE-START BLOCK-MAPPING-START FLOW-SEQUENCE-START FLOW-MAPPING-START BLOCK-ENTRY }
-# indentless_sequence: { ENTRY }
-# flow_collection: { FLOW-SEQUENCE-START FLOW-MAPPING-START }
-# flow_sequence: { FLOW-SEQUENCE-START }
-# flow_mapping: { FLOW-MAPPING-START }
-# flow_sequence_entry: { ALIAS ANCHOR TAG SCALAR FLOW-SEQUENCE-START FLOW-MAPPING-START KEY }
-# flow_mapping_entry: { ALIAS ANCHOR TAG SCALAR FLOW-SEQUENCE-START FLOW-MAPPING-START KEY }
-
-__all__ = ['Parser', 'ParserError']
-
-from error import MarkedYAMLError
-from tokens import *
-from events import *
-from scanner import *
-
-class ParserError(MarkedYAMLError):
- pass
-
-class Parser(object):
- # Since writing a recursive-descendant parser is a straightforward task, we
- # do not give many comments here.
-
- DEFAULT_TAGS = {
- u'!': u'!',
- u'!!': u'tag:yaml.org,2002:',
- }
-
- def __init__(self):
- self.current_event = None
- self.yaml_version = None
- self.tag_handles = {}
- self.states = []
- self.marks = []
- self.state = self.parse_stream_start
-
- def dispose(self):
- # Reset the state attributes (to clear self-references)
- self.states = []
- self.state = None
-
- def check_event(self, *choices):
- # Check the type of the next event.
- if self.current_event is None:
- if self.state:
- self.current_event = self.state()
- if self.current_event is not None:
- if not choices:
- return True
- for choice in choices:
- if isinstance(self.current_event, choice):
- return True
- return False
-
- def peek_event(self):
- # Get the next event.
- if self.current_event is None:
- if self.state:
- self.current_event = self.state()
- return self.current_event
-
- def get_event(self):
- # Get the next event and proceed further.
- if self.current_event is None:
- if self.state:
- self.current_event = self.state()
- value = self.current_event
- self.current_event = None
- return value
-
- # stream ::= STREAM-START implicit_document? explicit_document* STREAM-END
- # implicit_document ::= block_node DOCUMENT-END*
- # explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
-
- def parse_stream_start(self):
-
- # Parse the stream start.
- token = self.get_token()
- event = StreamStartEvent(token.start_mark, token.end_mark,
- encoding=token.encoding)
-
- # Prepare the next state.
- self.state = self.parse_implicit_document_start
-
- return event
-
- def parse_implicit_document_start(self):
-
- # Parse an implicit document.
- if not self.check_token(DirectiveToken, DocumentStartToken,
- StreamEndToken):
- self.tag_handles = self.DEFAULT_TAGS
- token = self.peek_token()
- start_mark = end_mark = token.start_mark
- event = DocumentStartEvent(start_mark, end_mark,
- explicit=False)
-
- # Prepare the next state.
- self.states.append(self.parse_document_end)
- self.state = self.parse_block_node
-
- return event
-
- else:
- return self.parse_document_start()
-
- def parse_document_start(self):
-
- # Parse any extra document end indicators.
- while self.check_token(DocumentEndToken):
- self.get_token()
-
- # Parse an explicit document.
- if not self.check_token(StreamEndToken):
- token = self.peek_token()
- start_mark = token.start_mark
- version, tags = self.process_directives()
- if not self.check_token(DocumentStartToken):
- raise ParserError(None, None,
- "expected '<document start>', but found %r"
- % self.peek_token().id,
- self.peek_token().start_mark)
- token = self.get_token()
- end_mark = token.end_mark
- event = DocumentStartEvent(start_mark, end_mark,
- explicit=True, version=version, tags=tags)
- self.states.append(self.parse_document_end)
- self.state = self.parse_document_content
- else:
- # Parse the end of the stream.
- token = self.get_token()
- event = StreamEndEvent(token.start_mark, token.end_mark)
- assert not self.states
- assert not self.marks
- self.state = None
- return event
-
- def parse_document_end(self):
-
- # Parse the document end.
- token = self.peek_token()
- start_mark = end_mark = token.start_mark
- explicit = False
- if self.check_token(DocumentEndToken):
- token = self.get_token()
- end_mark = token.end_mark
- explicit = True
- event = DocumentEndEvent(start_mark, end_mark,
- explicit=explicit)
-
- # Prepare the next state.
- self.state = self.parse_document_start
-
- return event
-
- def parse_document_content(self):
- if self.check_token(DirectiveToken,
- DocumentStartToken, DocumentEndToken, StreamEndToken):
- event = self.process_empty_scalar(self.peek_token().start_mark)
- self.state = self.states.pop()
- return event
- else:
- return self.parse_block_node()
-
- def process_directives(self):
- self.yaml_version = None
- self.tag_handles = {}
- while self.check_token(DirectiveToken):
- token = self.get_token()
- if token.name == u'YAML':
- if self.yaml_version is not None:
- raise ParserError(None, None,
- "found duplicate YAML directive", token.start_mark)
- major, minor = token.value
- if major != 1:
- raise ParserError(None, None,
- "found incompatible YAML document (version 1.* is required)",
- token.start_mark)
- self.yaml_version = token.value
- elif token.name == u'TAG':
- handle, prefix = token.value
- if handle in self.tag_handles:
- raise ParserError(None, None,
- "duplicate tag handle %r" % handle.encode('utf-8'),
- token.start_mark)
- self.tag_handles[handle] = prefix
- if self.tag_handles:
- value = self.yaml_version, self.tag_handles.copy()
- else:
- value = self.yaml_version, None
- for key in self.DEFAULT_TAGS:
- if key not in self.tag_handles:
- self.tag_handles[key] = self.DEFAULT_TAGS[key]
- return value
-
- # block_node_or_indentless_sequence ::= ALIAS
- # | properties (block_content | indentless_block_sequence)?
- # | block_content
- # | indentless_block_sequence
- # block_node ::= ALIAS
- # | properties block_content?
- # | block_content
- # flow_node ::= ALIAS
- # | properties flow_content?
- # | flow_content
- # properties ::= TAG ANCHOR? | ANCHOR TAG?
- # block_content ::= block_collection | flow_collection | SCALAR
- # flow_content ::= flow_collection | SCALAR
- # block_collection ::= block_sequence | block_mapping
- # flow_collection ::= flow_sequence | flow_mapping
-
- def parse_block_node(self):
- return self.parse_node(block=True)
-
- def parse_flow_node(self):
- return self.parse_node()
-
- def parse_block_node_or_indentless_sequence(self):
- return self.parse_node(block=True, indentless_sequence=True)
-
- def parse_node(self, block=False, indentless_sequence=False):
- if self.check_token(AliasToken):
- token = self.get_token()
- event = AliasEvent(token.value, token.start_mark, token.end_mark)
- self.state = self.states.pop()
- else:
- anchor = None
- tag = None
- start_mark = end_mark = tag_mark = None
- if self.check_token(AnchorToken):
- token = self.get_token()
- start_mark = token.start_mark
- end_mark = token.end_mark
- anchor = token.value
- if self.check_token(TagToken):
- token = self.get_token()
- tag_mark = token.start_mark
- end_mark = token.end_mark
- tag = token.value
- elif self.check_token(TagToken):
- token = self.get_token()
- start_mark = tag_mark = token.start_mark
- end_mark = token.end_mark
- tag = token.value
- if self.check_token(AnchorToken):
- token = self.get_token()
- end_mark = token.end_mark
- anchor = token.value
- if tag is not None:
- handle, suffix = tag
- if handle is not None:
- if handle not in self.tag_handles:
- raise ParserError("while parsing a node", start_mark,
- "found undefined tag handle %r" % handle.encode('utf-8'),
- tag_mark)
- tag = self.tag_handles[handle]+suffix
- else:
- tag = suffix
- #if tag == u'!':
- # raise ParserError("while parsing a node", start_mark,
- # "found non-specific tag '!'", tag_mark,
- # "Please check 'http://pyyaml.org/wiki/YAMLNonSpecificTag' and share your opinion.")
- if start_mark is None:
- start_mark = end_mark = self.peek_token().start_mark
- event = None
- implicit = (tag is None or tag == u'!')
- if indentless_sequence and self.check_token(BlockEntryToken):
- end_mark = self.peek_token().end_mark
- event = SequenceStartEvent(anchor, tag, implicit,
- start_mark, end_mark)
- self.state = self.parse_indentless_sequence_entry
- else:
- if self.check_token(ScalarToken):
- token = self.get_token()
- end_mark = token.end_mark
- if (token.plain and tag is None) or tag == u'!':
- implicit = (True, False)
- elif tag is None:
- implicit = (False, True)
- else:
- implicit = (False, False)
- event = ScalarEvent(anchor, tag, implicit, token.value,
- start_mark, end_mark, style=token.style)
- self.state = self.states.pop()
- elif self.check_token(FlowSequenceStartToken):
- end_mark = self.peek_token().end_mark
- event = SequenceStartEvent(anchor, tag, implicit,
- start_mark, end_mark, flow_style=True)
- self.state = self.parse_flow_sequence_first_entry
- elif self.check_token(FlowMappingStartToken):
- end_mark = self.peek_token().end_mark
- event = MappingStartEvent(anchor, tag, implicit,
- start_mark, end_mark, flow_style=True)
- self.state = self.parse_flow_mapping_first_key
- elif block and self.check_token(BlockSequenceStartToken):
- end_mark = self.peek_token().start_mark
- event = SequenceStartEvent(anchor, tag, implicit,
- start_mark, end_mark, flow_style=False)
- self.state = self.parse_block_sequence_first_entry
- elif block and self.check_token(BlockMappingStartToken):
- end_mark = self.peek_token().start_mark
- event = MappingStartEvent(anchor, tag, implicit,
- start_mark, end_mark, flow_style=False)
- self.state = self.parse_block_mapping_first_key
- elif anchor is not None or tag is not None:
- # Empty scalars are allowed even if a tag or an anchor is
- # specified.
- event = ScalarEvent(anchor, tag, (implicit, False), u'',
- start_mark, end_mark)
- self.state = self.states.pop()
- else:
- if block:
- node = 'block'
- else:
- node = 'flow'
- token = self.peek_token()
- raise ParserError("while parsing a %s node" % node, start_mark,
- "expected the node content, but found %r" % token.id,
- token.start_mark)
- return event
-
- # block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END
-
- def parse_block_sequence_first_entry(self):
- token = self.get_token()
- self.marks.append(token.start_mark)
- return self.parse_block_sequence_entry()
-
- def parse_block_sequence_entry(self):
- if self.check_token(BlockEntryToken):
- token = self.get_token()
- if not self.check_token(BlockEntryToken, BlockEndToken):
- self.states.append(self.parse_block_sequence_entry)
- return self.parse_block_node()
- else:
- self.state = self.parse_block_sequence_entry
- return self.process_empty_scalar(token.end_mark)
- if not self.check_token(BlockEndToken):
- token = self.peek_token()
- raise ParserError("while parsing a block collection", self.marks[-1],
- "expected <block end>, but found %r" % token.id, token.start_mark)
- token = self.get_token()
- event = SequenceEndEvent(token.start_mark, token.end_mark)
- self.state = self.states.pop()
- self.marks.pop()
- return event
-
- # indentless_sequence ::= (BLOCK-ENTRY block_node?)+
-
- def parse_indentless_sequence_entry(self):
- if self.check_token(BlockEntryToken):
- token = self.get_token()
- if not self.check_token(BlockEntryToken,
- KeyToken, ValueToken, BlockEndToken):
- self.states.append(self.parse_indentless_sequence_entry)
- return self.parse_block_node()
- else:
- self.state = self.parse_indentless_sequence_entry
- return self.process_empty_scalar(token.end_mark)
- token = self.peek_token()
- event = SequenceEndEvent(token.start_mark, token.start_mark)
- self.state = self.states.pop()
- return event
-
- # block_mapping ::= BLOCK-MAPPING_START
- # ((KEY block_node_or_indentless_sequence?)?
- # (VALUE block_node_or_indentless_sequence?)?)*
- # BLOCK-END
-
- def parse_block_mapping_first_key(self):
- token = self.get_token()
- self.marks.append(token.start_mark)
- return self.parse_block_mapping_key()
-
- def parse_block_mapping_key(self):
- if self.check_token(KeyToken):
- token = self.get_token()
- if not self.check_token(KeyToken, ValueToken, BlockEndToken):
- self.states.append(self.parse_block_mapping_value)
- return self.parse_block_node_or_indentless_sequence()
- else:
- self.state = self.parse_block_mapping_value
- return self.process_empty_scalar(token.end_mark)
- if not self.check_token(BlockEndToken):
- token = self.peek_token()
- raise ParserError("while parsing a block mapping", self.marks[-1],
- "expected <block end>, but found %r" % token.id, token.start_mark)
- token = self.get_token()
- event = MappingEndEvent(token.start_mark, token.end_mark)
- self.state = self.states.pop()
- self.marks.pop()
- return event
-
- def parse_block_mapping_value(self):
- if self.check_token(ValueToken):
- token = self.get_token()
- if not self.check_token(KeyToken, ValueToken, BlockEndToken):
- self.states.append(self.parse_block_mapping_key)
- return self.parse_block_node_or_indentless_sequence()
- else:
- self.state = self.parse_block_mapping_key
- return self.process_empty_scalar(token.end_mark)
- else:
- self.state = self.parse_block_mapping_key
- token = self.peek_token()
- return self.process_empty_scalar(token.start_mark)
-
- # flow_sequence ::= FLOW-SEQUENCE-START
- # (flow_sequence_entry FLOW-ENTRY)*
- # flow_sequence_entry?
- # FLOW-SEQUENCE-END
- # flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
- #
- # Note that while production rules for both flow_sequence_entry and
- # flow_mapping_entry are equal, their interpretations are different.
- # For `flow_sequence_entry`, the part `KEY flow_node? (VALUE flow_node?)?`
- # generate an inline mapping (set syntax).
-
- def parse_flow_sequence_first_entry(self):
- token = self.get_token()
- self.marks.append(token.start_mark)
- return self.parse_flow_sequence_entry(first=True)
-
- def parse_flow_sequence_entry(self, first=False):
- if not self.check_token(FlowSequenceEndToken):
- if not first:
- if self.check_token(FlowEntryToken):
- self.get_token()
- else:
- token = self.peek_token()
- raise ParserError("while parsing a flow sequence", self.marks[-1],
- "expected ',' or ']', but got %r" % token.id, token.start_mark)
-
- if self.check_token(KeyToken):
- token = self.peek_token()
- event = MappingStartEvent(None, None, True,
- token.start_mark, token.end_mark,
- flow_style=True)
- self.state = self.parse_flow_sequence_entry_mapping_key
- return event
- elif not self.check_token(FlowSequenceEndToken):
- self.states.append(self.parse_flow_sequence_entry)
- return self.parse_flow_node()
- token = self.get_token()
- event = SequenceEndEvent(token.start_mark, token.end_mark)
- self.state = self.states.pop()
- self.marks.pop()
- return event
-
- def parse_flow_sequence_entry_mapping_key(self):
- token = self.get_token()
- if not self.check_token(ValueToken,
- FlowEntryToken, FlowSequenceEndToken):
- self.states.append(self.parse_flow_sequence_entry_mapping_value)
- return self.parse_flow_node()
- else:
- self.state = self.parse_flow_sequence_entry_mapping_value
- return self.process_empty_scalar(token.end_mark)
-
- def parse_flow_sequence_entry_mapping_value(self):
- if self.check_token(ValueToken):
- token = self.get_token()
- if not self.check_token(FlowEntryToken, FlowSequenceEndToken):
- self.states.append(self.parse_flow_sequence_entry_mapping_end)
- return self.parse_flow_node()
- else:
- self.state = self.parse_flow_sequence_entry_mapping_end
- return self.process_empty_scalar(token.end_mark)
- else:
- self.state = self.parse_flow_sequence_entry_mapping_end
- token = self.peek_token()
- return self.process_empty_scalar(token.start_mark)
-
- def parse_flow_sequence_entry_mapping_end(self):
- self.state = self.parse_flow_sequence_entry
- token = self.peek_token()
- return MappingEndEvent(token.start_mark, token.start_mark)
-
- # flow_mapping ::= FLOW-MAPPING-START
- # (flow_mapping_entry FLOW-ENTRY)*
- # flow_mapping_entry?
- # FLOW-MAPPING-END
- # flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
-
- def parse_flow_mapping_first_key(self):
- token = self.get_token()
- self.marks.append(token.start_mark)
- return self.parse_flow_mapping_key(first=True)
-
- def parse_flow_mapping_key(self, first=False):
- if not self.check_token(FlowMappingEndToken):
- if not first:
- if self.check_token(FlowEntryToken):
- self.get_token()
- else:
- token = self.peek_token()
- raise ParserError("while parsing a flow mapping", self.marks[-1],
- "expected ',' or '}', but got %r" % token.id, token.start_mark)
- if self.check_token(KeyToken):
- token = self.get_token()
- if not self.check_token(ValueToken,
- FlowEntryToken, FlowMappingEndToken):
- self.states.append(self.parse_flow_mapping_value)
- return self.parse_flow_node()
- else:
- self.state = self.parse_flow_mapping_value
- return self.process_empty_scalar(token.end_mark)
- elif not self.check_token(FlowMappingEndToken):
- self.states.append(self.parse_flow_mapping_empty_value)
- return self.parse_flow_node()
- token = self.get_token()
- event = MappingEndEvent(token.start_mark, token.end_mark)
- self.state = self.states.pop()
- self.marks.pop()
- return event
-
- def parse_flow_mapping_value(self):
- if self.check_token(ValueToken):
- token = self.get_token()
- if not self.check_token(FlowEntryToken, FlowMappingEndToken):
- self.states.append(self.parse_flow_mapping_key)
- return self.parse_flow_node()
- else:
- self.state = self.parse_flow_mapping_key
- return self.process_empty_scalar(token.end_mark)
- else:
- self.state = self.parse_flow_mapping_key
- token = self.peek_token()
- return self.process_empty_scalar(token.start_mark)
-
- def parse_flow_mapping_empty_value(self):
- self.state = self.parse_flow_mapping_key
- return self.process_empty_scalar(self.peek_token().start_mark)
-
- def process_empty_scalar(self, mark):
- return ScalarEvent(None, None, (True, False), u'', mark, mark)
-
diff --git a/src/collectors/python.d.plugin/python_modules/pyyaml2/reader.py b/src/collectors/python.d.plugin/python_modules/pyyaml2/reader.py
deleted file mode 100644
index 8d422954e..000000000
--- a/src/collectors/python.d.plugin/python_modules/pyyaml2/reader.py
+++ /dev/null
@@ -1,191 +0,0 @@
-# SPDX-License-Identifier: MIT
-# This module contains abstractions for the input stream. You don't have to
-# looks further, there are no pretty code.
-#
-# We define two classes here.
-#
-# Mark(source, line, column)
-# It's just a record and its only use is producing nice error messages.
-# Parser does not use it for any other purposes.
-#
-# Reader(source, data)
-# Reader determines the encoding of `data` and converts it to unicode.
-# Reader provides the following methods and attributes:
-# reader.peek(length=1) - return the next `length` characters
-# reader.forward(length=1) - move the current position to `length` characters.
-# reader.index - the number of the current character.
-# reader.line, stream.column - the line and the column of the current character.
-
-__all__ = ['Reader', 'ReaderError']
-
-from error import YAMLError, Mark
-
-import codecs, re
-
-class ReaderError(YAMLError):
-
- def __init__(self, name, position, character, encoding, reason):
- self.name = name
- self.character = character
- self.position = position
- self.encoding = encoding
- self.reason = reason
-
- def __str__(self):
- if isinstance(self.character, str):
- return "'%s' codec can't decode byte #x%02x: %s\n" \
- " in \"%s\", position %d" \
- % (self.encoding, ord(self.character), self.reason,
- self.name, self.position)
- else:
- return "unacceptable character #x%04x: %s\n" \
- " in \"%s\", position %d" \
- % (self.character, self.reason,
- self.name, self.position)
-
-class Reader(object):
- # Reader:
- # - determines the data encoding and converts it to unicode,
- # - checks if characters are in allowed range,
- # - adds '\0' to the end.
-
- # Reader accepts
- # - a `str` object,
- # - a `unicode` object,
- # - a file-like object with its `read` method returning `str`,
- # - a file-like object with its `read` method returning `unicode`.
-
- # Yeah, it's ugly and slow.
-
- def __init__(self, stream):
- self.name = None
- self.stream = None
- self.stream_pointer = 0
- self.eof = True
- self.buffer = u''
- self.pointer = 0
- self.raw_buffer = None
- self.raw_decode = None
- self.encoding = None
- self.index = 0
- self.line = 0
- self.column = 0
- if isinstance(stream, unicode):
- self.name = "<unicode string>"
- self.check_printable(stream)
- self.buffer = stream+u'\0'
- elif isinstance(stream, str):
- self.name = "<string>"
- self.raw_buffer = stream
- self.determine_encoding()
- else:
- self.stream = stream
- self.name = getattr(stream, 'name', "<file>")
- self.eof = False
- self.raw_buffer = ''
- self.determine_encoding()
-
- def peek(self, index=0):
- try:
- return self.buffer[self.pointer+index]
- except IndexError:
- self.update(index+1)
- return self.buffer[self.pointer+index]
-
- def prefix(self, length=1):
- if self.pointer+length >= len(self.buffer):
- self.update(length)
- return self.buffer[self.pointer:self.pointer+length]
-
- def forward(self, length=1):
- if self.pointer+length+1 >= len(self.buffer):
- self.update(length+1)
- while length:
- ch = self.buffer[self.pointer]
- self.pointer += 1
- self.index += 1
- if ch in u'\n\x85\u2028\u2029' \
- or (ch == u'\r' and self.buffer[self.pointer] != u'\n'):
- self.line += 1
- self.column = 0
- elif ch != u'\uFEFF':
- self.column += 1
- length -= 1
-
- def get_mark(self):
- if self.stream is None:
- return Mark(self.name, self.index, self.line, self.column,
- self.buffer, self.pointer)
- else:
- return Mark(self.name, self.index, self.line, self.column,
- None, None)
-
- def determine_encoding(self):
- while not self.eof and len(self.raw_buffer) < 2:
- self.update_raw()
- if not isinstance(self.raw_buffer, unicode):
- if self.raw_buffer.startswith(codecs.BOM_UTF16_LE):
- self.raw_decode = codecs.utf_16_le_decode
- self.encoding = 'utf-16-le'
- elif self.raw_buffer.startswith(codecs.BOM_UTF16_BE):
- self.raw_decode = codecs.utf_16_be_decode
- self.encoding = 'utf-16-be'
- else:
- self.raw_decode = codecs.utf_8_decode
- self.encoding = 'utf-8'
- self.update(1)
-
- NON_PRINTABLE = re.compile(u'[^\x09\x0A\x0D\x20-\x7E\x85\xA0-\uD7FF\uE000-\uFFFD]')
- def check_printable(self, data):
- match = self.NON_PRINTABLE.search(data)
- if match:
- character = match.group()
- position = self.index+(len(self.buffer)-self.pointer)+match.start()
- raise ReaderError(self.name, position, ord(character),
- 'unicode', "special characters are not allowed")
-
- def update(self, length):
- if self.raw_buffer is None:
- return
- self.buffer = self.buffer[self.pointer:]
- self.pointer = 0
- while len(self.buffer) < length:
- if not self.eof:
- self.update_raw()
- if self.raw_decode is not None:
- try:
- data, converted = self.raw_decode(self.raw_buffer,
- 'strict', self.eof)
- except UnicodeDecodeError, exc:
- character = exc.object[exc.start]
- if self.stream is not None:
- position = self.stream_pointer-len(self.raw_buffer)+exc.start
- else:
- position = exc.start
- raise ReaderError(self.name, position, character,
- exc.encoding, exc.reason)
- else:
- data = self.raw_buffer
- converted = len(data)
- self.check_printable(data)
- self.buffer += data
- self.raw_buffer = self.raw_buffer[converted:]
- if self.eof:
- self.buffer += u'\0'
- self.raw_buffer = None
- break
-
- def update_raw(self, size=1024):
- data = self.stream.read(size)
- if data:
- self.raw_buffer += data
- self.stream_pointer += len(data)
- else:
- self.eof = True
-
-#try:
-# import psyco
-# psyco.bind(Reader)
-#except ImportError:
-# pass
-
diff --git a/src/collectors/python.d.plugin/python_modules/pyyaml2/representer.py b/src/collectors/python.d.plugin/python_modules/pyyaml2/representer.py
deleted file mode 100644
index 0a1404eca..000000000
--- a/src/collectors/python.d.plugin/python_modules/pyyaml2/representer.py
+++ /dev/null
@@ -1,485 +0,0 @@
-# SPDX-License-Identifier: MIT
-
-__all__ = ['BaseRepresenter', 'SafeRepresenter', 'Representer',
- 'RepresenterError']
-
-from error import *
-from nodes import *
-
-import datetime
-
-import sys, copy_reg, types
-
-class RepresenterError(YAMLError):
- pass
-
-class BaseRepresenter(object):
-
- yaml_representers = {}
- yaml_multi_representers = {}
-
- def __init__(self, default_style=None, default_flow_style=None):
- self.default_style = default_style
- self.default_flow_style = default_flow_style
- self.represented_objects = {}
- self.object_keeper = []
- self.alias_key = None
-
- def represent(self, data):
- node = self.represent_data(data)
- self.serialize(node)
- self.represented_objects = {}
- self.object_keeper = []
- self.alias_key = None
-
- def get_classobj_bases(self, cls):
- bases = [cls]
- for base in cls.__bases__:
- bases.extend(self.get_classobj_bases(base))
- return bases
-
- def represent_data(self, data):
- if self.ignore_aliases(data):
- self.alias_key = None
- else:
- self.alias_key = id(data)
- if self.alias_key is not None:
- if self.alias_key in self.represented_objects:
- node = self.represented_objects[self.alias_key]
- #if node is None:
- # raise RepresenterError("recursive objects are not allowed: %r" % data)
- return node
- #self.represented_objects[alias_key] = None
- self.object_keeper.append(data)
- data_types = type(data).__mro__
- if type(data) is types.InstanceType:
- data_types = self.get_classobj_bases(data.__class__)+list(data_types)
- if data_types[0] in self.yaml_representers:
- node = self.yaml_representers[data_types[0]](self, data)
- else:
- for data_type in data_types:
- if data_type in self.yaml_multi_representers:
- node = self.yaml_multi_representers[data_type](self, data)
- break
- else:
- if None in self.yaml_multi_representers:
- node = self.yaml_multi_representers[None](self, data)
- elif None in self.yaml_representers:
- node = self.yaml_representers[None](self, data)
- else:
- node = ScalarNode(None, unicode(data))
- #if alias_key is not None:
- # self.represented_objects[alias_key] = node
- return node
-
- def add_representer(cls, data_type, representer):
- if not 'yaml_representers' in cls.__dict__:
- cls.yaml_representers = cls.yaml_representers.copy()
- cls.yaml_representers[data_type] = representer
- add_representer = classmethod(add_representer)
-
- def add_multi_representer(cls, data_type, representer):
- if not 'yaml_multi_representers' in cls.__dict__:
- cls.yaml_multi_representers = cls.yaml_multi_representers.copy()
- cls.yaml_multi_representers[data_type] = representer
- add_multi_representer = classmethod(add_multi_representer)
-
- def represent_scalar(self, tag, value, style=None):
- if style is None:
- style = self.default_style
- node = ScalarNode(tag, value, style=style)
- if self.alias_key is not None:
- self.represented_objects[self.alias_key] = node
- return node
-
- def represent_sequence(self, tag, sequence, flow_style=None):
- value = []
- node = SequenceNode(tag, value, flow_style=flow_style)
- if self.alias_key is not None:
- self.represented_objects[self.alias_key] = node
- best_style = True
- for item in sequence:
- node_item = self.represent_data(item)
- if not (isinstance(node_item, ScalarNode) and not node_item.style):
- best_style = False
- value.append(node_item)
- if flow_style is None:
- if self.default_flow_style is not None:
- node.flow_style = self.default_flow_style
- else:
- node.flow_style = best_style
- return node
-
- def represent_mapping(self, tag, mapping, flow_style=None):
- value = []
- node = MappingNode(tag, value, flow_style=flow_style)
- if self.alias_key is not None:
- self.represented_objects[self.alias_key] = node
- best_style = True
- if hasattr(mapping, 'items'):
- mapping = mapping.items()
- mapping.sort()
- for item_key, item_value in mapping:
- node_key = self.represent_data(item_key)
- node_value = self.represent_data(item_value)
- if not (isinstance(node_key, ScalarNode) and not node_key.style):
- best_style = False
- if not (isinstance(node_value, ScalarNode) and not node_value.style):
- best_style = False
- value.append((node_key, node_value))
- if flow_style is None:
- if self.default_flow_style is not None:
- node.flow_style = self.default_flow_style
- else:
- node.flow_style = best_style
- return node
-
- def ignore_aliases(self, data):
- return False
-
-class SafeRepresenter(BaseRepresenter):
-
- def ignore_aliases(self, data):
- if data in [None, ()]:
- return True
- if isinstance(data, (str, unicode, bool, int, float)):
- return True
-
- def represent_none(self, data):
- return self.represent_scalar(u'tag:yaml.org,2002:null',
- u'null')
-
- def represent_str(self, data):
- tag = None
- style = None
- try:
- data = unicode(data, 'ascii')
- tag = u'tag:yaml.org,2002:str'
- except UnicodeDecodeError:
- try:
- data = unicode(data, 'utf-8')
- tag = u'tag:yaml.org,2002:str'
- except UnicodeDecodeError:
- data = data.encode('base64')
- tag = u'tag:yaml.org,2002:binary'
- style = '|'
- return self.represent_scalar(tag, data, style=style)
-
- def represent_unicode(self, data):
- return self.represent_scalar(u'tag:yaml.org,2002:str', data)
-
- def represent_bool(self, data):
- if data:
- value = u'true'
- else:
- value = u'false'
- return self.represent_scalar(u'tag:yaml.org,2002:bool', value)
-
- def represent_int(self, data):
- return self.represent_scalar(u'tag:yaml.org,2002:int', unicode(data))
-
- def represent_long(self, data):
- return self.represent_scalar(u'tag:yaml.org,2002:int', unicode(data))
-
- inf_value = 1e300
- while repr(inf_value) != repr(inf_value*inf_value):
- inf_value *= inf_value
-
- def represent_float(self, data):
- if data != data or (data == 0.0 and data == 1.0):
- value = u'.nan'
- elif data == self.inf_value:
- value = u'.inf'
- elif data == -self.inf_value:
- value = u'-.inf'
- else:
- value = unicode(repr(data)).lower()
- # Note that in some cases `repr(data)` represents a float number
- # without the decimal parts. For instance:
- # >>> repr(1e17)
- # '1e17'
- # Unfortunately, this is not a valid float representation according
- # to the definition of the `!!float` tag. We fix this by adding
- # '.0' before the 'e' symbol.
- if u'.' not in value and u'e' in value:
- value = value.replace(u'e', u'.0e', 1)
- return self.represent_scalar(u'tag:yaml.org,2002:float', value)
-
- def represent_list(self, data):
- #pairs = (len(data) > 0 and isinstance(data, list))
- #if pairs:
- # for item in data:
- # if not isinstance(item, tuple) or len(item) != 2:
- # pairs = False
- # break
- #if not pairs:
- return self.represent_sequence(u'tag:yaml.org,2002:seq', data)
- #value = []
- #for item_key, item_value in data:
- # value.append(self.represent_mapping(u'tag:yaml.org,2002:map',
- # [(item_key, item_value)]))
- #return SequenceNode(u'tag:yaml.org,2002:pairs', value)
-
- def represent_dict(self, data):
- return self.represent_mapping(u'tag:yaml.org,2002:map', data)
-
- def represent_set(self, data):
- value = {}
- for key in data:
- value[key] = None
- return self.represent_mapping(u'tag:yaml.org,2002:set', value)
-
- def represent_date(self, data):
- value = unicode(data.isoformat())
- return self.represent_scalar(u'tag:yaml.org,2002:timestamp', value)
-
- def represent_datetime(self, data):
- value = unicode(data.isoformat(' '))
- return self.represent_scalar(u'tag:yaml.org,2002:timestamp', value)
-
- def represent_yaml_object(self, tag, data, cls, flow_style=None):
- if hasattr(data, '__getstate__'):
- state = data.__getstate__()
- else:
- state = data.__dict__.copy()
- return self.represent_mapping(tag, state, flow_style=flow_style)
-
- def represent_undefined(self, data):
- raise RepresenterError("cannot represent an object: %s" % data)
-
-SafeRepresenter.add_representer(type(None),
- SafeRepresenter.represent_none)
-
-SafeRepresenter.add_representer(str,
- SafeRepresenter.represent_str)
-
-SafeRepresenter.add_representer(unicode,
- SafeRepresenter.represent_unicode)
-
-SafeRepresenter.add_representer(bool,
- SafeRepresenter.represent_bool)
-
-SafeRepresenter.add_representer(int,
- SafeRepresenter.represent_int)
-
-SafeRepresenter.add_representer(long,
- SafeRepresenter.represent_long)
-
-SafeRepresenter.add_representer(float,
- SafeRepresenter.represent_float)
-
-SafeRepresenter.add_representer(list,
- SafeRepresenter.represent_list)
-
-SafeRepresenter.add_representer(tuple,
- SafeRepresenter.represent_list)
-
-SafeRepresenter.add_representer(dict,
- SafeRepresenter.represent_dict)
-
-SafeRepresenter.add_representer(set,
- SafeRepresenter.represent_set)
-
-SafeRepresenter.add_representer(datetime.date,
- SafeRepresenter.represent_date)
-
-SafeRepresenter.add_representer(datetime.datetime,
- SafeRepresenter.represent_datetime)
-
-SafeRepresenter.add_representer(None,
- SafeRepresenter.represent_undefined)
-
-class Representer(SafeRepresenter):
-
- def represent_str(self, data):
- tag = None
- style = None
- try:
- data = unicode(data, 'ascii')
- tag = u'tag:yaml.org,2002:str'
- except UnicodeDecodeError:
- try:
- data = unicode(data, 'utf-8')
- tag = u'tag:yaml.org,2002:python/str'
- except UnicodeDecodeError:
- data = data.encode('base64')
- tag = u'tag:yaml.org,2002:binary'
- style = '|'
- return self.represent_scalar(tag, data, style=style)
-
- def represent_unicode(self, data):
- tag = None
- try:
- data.encode('ascii')
- tag = u'tag:yaml.org,2002:python/unicode'
- except UnicodeEncodeError:
- tag = u'tag:yaml.org,2002:str'
- return self.represent_scalar(tag, data)
-
- def represent_long(self, data):
- tag = u'tag:yaml.org,2002:int'
- if int(data) is not data:
- tag = u'tag:yaml.org,2002:python/long'
- return self.represent_scalar(tag, unicode(data))
-
- def represent_complex(self, data):
- if data.imag == 0.0:
- data = u'%r' % data.real
- elif data.real == 0.0:
- data = u'%rj' % data.imag
- elif data.imag > 0:
- data = u'%r+%rj' % (data.real, data.imag)
- else:
- data = u'%r%rj' % (data.real, data.imag)
- return self.represent_scalar(u'tag:yaml.org,2002:python/complex', data)
-
- def represent_tuple(self, data):
- return self.represent_sequence(u'tag:yaml.org,2002:python/tuple', data)
-
- def represent_name(self, data):
- name = u'%s.%s' % (data.__module__, data.__name__)
- return self.represent_scalar(u'tag:yaml.org,2002:python/name:'+name, u'')
-
- def represent_module(self, data):
- return self.represent_scalar(
- u'tag:yaml.org,2002:python/module:'+data.__name__, u'')
-
- def represent_instance(self, data):
- # For instances of classic classes, we use __getinitargs__ and
- # __getstate__ to serialize the data.
-
- # If data.__getinitargs__ exists, the object must be reconstructed by
- # calling cls(**args), where args is a tuple returned by
- # __getinitargs__. Otherwise, the cls.__init__ method should never be
- # called and the class instance is created by instantiating a trivial
- # class and assigning to the instance's __class__ variable.
-
- # If data.__getstate__ exists, it returns the state of the object.
- # Otherwise, the state of the object is data.__dict__.
-
- # We produce either a !!python/object or !!python/object/new node.
- # If data.__getinitargs__ does not exist and state is a dictionary, we
- # produce a !!python/object node . Otherwise we produce a
- # !!python/object/new node.
-
- cls = data.__class__
- class_name = u'%s.%s' % (cls.__module__, cls.__name__)
- args = None
- state = None
- if hasattr(data, '__getinitargs__'):
- args = list(data.__getinitargs__())
- if hasattr(data, '__getstate__'):
- state = data.__getstate__()
- else:
- state = data.__dict__
- if args is None and isinstance(state, dict):
- return self.represent_mapping(
- u'tag:yaml.org,2002:python/object:'+class_name, state)
- if isinstance(state, dict) and not state:
- return self.represent_sequence(
- u'tag:yaml.org,2002:python/object/new:'+class_name, args)
- value = {}
- if args:
- value['args'] = args
- value['state'] = state
- return self.represent_mapping(
- u'tag:yaml.org,2002:python/object/new:'+class_name, value)
-
- def represent_object(self, data):
- # We use __reduce__ API to save the data. data.__reduce__ returns
- # a tuple of length 2-5:
- # (function, args, state, listitems, dictitems)
-
- # For reconstructing, we calls function(*args), then set its state,
- # listitems, and dictitems if they are not None.
-
- # A special case is when function.__name__ == '__newobj__'. In this
- # case we create the object with args[0].__new__(*args).
-
- # Another special case is when __reduce__ returns a string - we don't
- # support it.
-
- # We produce a !!python/object, !!python/object/new or
- # !!python/object/apply node.
-
- cls = type(data)
- if cls in copy_reg.dispatch_table:
- reduce = copy_reg.dispatch_table[cls](data)
- elif hasattr(data, '__reduce_ex__'):
- reduce = data.__reduce_ex__(2)
- elif hasattr(data, '__reduce__'):
- reduce = data.__reduce__()
- else:
- raise RepresenterError("cannot represent object: %r" % data)
- reduce = (list(reduce)+[None]*5)[:5]
- function, args, state, listitems, dictitems = reduce
- args = list(args)
- if state is None:
- state = {}
- if listitems is not None:
- listitems = list(listitems)
- if dictitems is not None:
- dictitems = dict(dictitems)
- if function.__name__ == '__newobj__':
- function = args[0]
- args = args[1:]
- tag = u'tag:yaml.org,2002:python/object/new:'
- newobj = True
- else:
- tag = u'tag:yaml.org,2002:python/object/apply:'
- newobj = False
- function_name = u'%s.%s' % (function.__module__, function.__name__)
- if not args and not listitems and not dictitems \
- and isinstance(state, dict) and newobj:
- return self.represent_mapping(
- u'tag:yaml.org,2002:python/object:'+function_name, state)
- if not listitems and not dictitems \
- and isinstance(state, dict) and not state:
- return self.represent_sequence(tag+function_name, args)
- value = {}
- if args:
- value['args'] = args
- if state or not isinstance(state, dict):
- value['state'] = state
- if listitems:
- value['listitems'] = listitems
- if dictitems:
- value['dictitems'] = dictitems
- return self.represent_mapping(tag+function_name, value)
-
-Representer.add_representer(str,
- Representer.represent_str)
-
-Representer.add_representer(unicode,
- Representer.represent_unicode)
-
-Representer.add_representer(long,
- Representer.represent_long)
-
-Representer.add_representer(complex,
- Representer.represent_complex)
-
-Representer.add_representer(tuple,
- Representer.represent_tuple)
-
-Representer.add_representer(type,
- Representer.represent_name)
-
-Representer.add_representer(types.ClassType,
- Representer.represent_name)
-
-Representer.add_representer(types.FunctionType,
- Representer.represent_name)
-
-Representer.add_representer(types.BuiltinFunctionType,
- Representer.represent_name)
-
-Representer.add_representer(types.ModuleType,
- Representer.represent_module)
-
-Representer.add_multi_representer(types.InstanceType,
- Representer.represent_instance)
-
-Representer.add_multi_representer(object,
- Representer.represent_object)
-
diff --git a/src/collectors/python.d.plugin/python_modules/pyyaml2/resolver.py b/src/collectors/python.d.plugin/python_modules/pyyaml2/resolver.py
deleted file mode 100644
index 49922debf..000000000
--- a/src/collectors/python.d.plugin/python_modules/pyyaml2/resolver.py
+++ /dev/null
@@ -1,225 +0,0 @@
-# SPDX-License-Identifier: MIT
-
-__all__ = ['BaseResolver', 'Resolver']
-
-from error import *
-from nodes import *
-
-import re
-
-class ResolverError(YAMLError):
- pass
-
-class BaseResolver(object):
-
- DEFAULT_SCALAR_TAG = u'tag:yaml.org,2002:str'
- DEFAULT_SEQUENCE_TAG = u'tag:yaml.org,2002:seq'
- DEFAULT_MAPPING_TAG = u'tag:yaml.org,2002:map'
-
- yaml_implicit_resolvers = {}
- yaml_path_resolvers = {}
-
- def __init__(self):
- self.resolver_exact_paths = []
- self.resolver_prefix_paths = []
-
- def add_implicit_resolver(cls, tag, regexp, first):
- if not 'yaml_implicit_resolvers' in cls.__dict__:
- cls.yaml_implicit_resolvers = cls.yaml_implicit_resolvers.copy()
- if first is None:
- first = [None]
- for ch in first:
- cls.yaml_implicit_resolvers.setdefault(ch, []).append((tag, regexp))
- add_implicit_resolver = classmethod(add_implicit_resolver)
-
- def add_path_resolver(cls, tag, path, kind=None):
- # Note: `add_path_resolver` is experimental. The API could be changed.
- # `new_path` is a pattern that is matched against the path from the
- # root to the node that is being considered. `node_path` elements are
- # tuples `(node_check, index_check)`. `node_check` is a node class:
- # `ScalarNode`, `SequenceNode`, `MappingNode` or `None`. `None`
- # matches any kind of a node. `index_check` could be `None`, a boolean
- # value, a string value, or a number. `None` and `False` match against
- # any _value_ of sequence and mapping nodes. `True` matches against
- # any _key_ of a mapping node. A string `index_check` matches against
- # a mapping value that corresponds to a scalar key which content is
- # equal to the `index_check` value. An integer `index_check` matches
- # against a sequence value with the index equal to `index_check`.
- if not 'yaml_path_resolvers' in cls.__dict__:
- cls.yaml_path_resolvers = cls.yaml_path_resolvers.copy()
- new_path = []
- for element in path:
- if isinstance(element, (list, tuple)):
- if len(element) == 2:
- node_check, index_check = element
- elif len(element) == 1:
- node_check = element[0]
- index_check = True
- else:
- raise ResolverError("Invalid path element: %s" % element)
- else:
- node_check = None
- index_check = element
- if node_check is str:
- node_check = ScalarNode
- elif node_check is list:
- node_check = SequenceNode
- elif node_check is dict:
- node_check = MappingNode
- elif node_check not in [ScalarNode, SequenceNode, MappingNode] \
- and not isinstance(node_check, basestring) \
- and node_check is not None:
- raise ResolverError("Invalid node checker: %s" % node_check)
- if not isinstance(index_check, (basestring, int)) \
- and index_check is not None:
- raise ResolverError("Invalid index checker: %s" % index_check)
- new_path.append((node_check, index_check))
- if kind is str:
- kind = ScalarNode
- elif kind is list:
- kind = SequenceNode
- elif kind is dict:
- kind = MappingNode
- elif kind not in [ScalarNode, SequenceNode, MappingNode] \
- and kind is not None:
- raise ResolverError("Invalid node kind: %s" % kind)
- cls.yaml_path_resolvers[tuple(new_path), kind] = tag
- add_path_resolver = classmethod(add_path_resolver)
-
- def descend_resolver(self, current_node, current_index):
- if not self.yaml_path_resolvers:
- return
- exact_paths = {}
- prefix_paths = []
- if current_node:
- depth = len(self.resolver_prefix_paths)
- for path, kind in self.resolver_prefix_paths[-1]:
- if self.check_resolver_prefix(depth, path, kind,
- current_node, current_index):
- if len(path) > depth:
- prefix_paths.append((path, kind))
- else:
- exact_paths[kind] = self.yaml_path_resolvers[path, kind]
- else:
- for path, kind in self.yaml_path_resolvers:
- if not path:
- exact_paths[kind] = self.yaml_path_resolvers[path, kind]
- else:
- prefix_paths.append((path, kind))
- self.resolver_exact_paths.append(exact_paths)
- self.resolver_prefix_paths.append(prefix_paths)
-
- def ascend_resolver(self):
- if not self.yaml_path_resolvers:
- return
- self.resolver_exact_paths.pop()
- self.resolver_prefix_paths.pop()
-
- def check_resolver_prefix(self, depth, path, kind,
- current_node, current_index):
- node_check, index_check = path[depth-1]
- if isinstance(node_check, basestring):
- if current_node.tag != node_check:
- return
- elif node_check is not None:
- if not isinstance(current_node, node_check):
- return
- if index_check is True and current_index is not None:
- return
- if (index_check is False or index_check is None) \
- and current_index is None:
- return
- if isinstance(index_check, basestring):
- if not (isinstance(current_index, ScalarNode)
- and index_check == current_index.value):
- return
- elif isinstance(index_check, int) and not isinstance(index_check, bool):
- if index_check != current_index:
- return
- return True
-
- def resolve(self, kind, value, implicit):
- if kind is ScalarNode and implicit[0]:
- if value == u'':
- resolvers = self.yaml_implicit_resolvers.get(u'', [])
- else:
- resolvers = self.yaml_implicit_resolvers.get(value[0], [])
- resolvers += self.yaml_implicit_resolvers.get(None, [])
- for tag, regexp in resolvers:
- if regexp.match(value):
- return tag
- implicit = implicit[1]
- if self.yaml_path_resolvers:
- exact_paths = self.resolver_exact_paths[-1]
- if kind in exact_paths:
- return exact_paths[kind]
- if None in exact_paths:
- return exact_paths[None]
- if kind is ScalarNode:
- return self.DEFAULT_SCALAR_TAG
- elif kind is SequenceNode:
- return self.DEFAULT_SEQUENCE_TAG
- elif kind is MappingNode:
- return self.DEFAULT_MAPPING_TAG
-
-class Resolver(BaseResolver):
- pass
-
-Resolver.add_implicit_resolver(
- u'tag:yaml.org,2002:bool',
- re.compile(ur'''^(?:yes|Yes|YES|no|No|NO
- |true|True|TRUE|false|False|FALSE
- |on|On|ON|off|Off|OFF)$''', re.X),
- list(u'yYnNtTfFoO'))
-
-Resolver.add_implicit_resolver(
- u'tag:yaml.org,2002:float',
- re.compile(ur'''^(?:[-+]?(?:[0-9][0-9_]*)\.[0-9_]*(?:[eE][-+][0-9]+)?
- |\.[0-9_]+(?:[eE][-+][0-9]+)?
- |[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+\.[0-9_]*
- |[-+]?\.(?:inf|Inf|INF)
- |\.(?:nan|NaN|NAN))$''', re.X),
- list(u'-+0123456789.'))
-
-Resolver.add_implicit_resolver(
- u'tag:yaml.org,2002:int',
- re.compile(ur'''^(?:[-+]?0b[0-1_]+
- |[-+]?0[0-7_]+
- |[-+]?(?:0|[1-9][0-9_]*)
- |[-+]?0x[0-9a-fA-F_]+
- |[-+]?[1-9][0-9_]*(?::[0-5]?[0-9])+)$''', re.X),
- list(u'-+0123456789'))
-
-Resolver.add_implicit_resolver(
- u'tag:yaml.org,2002:merge',
- re.compile(ur'^(?:<<)$'),
- [u'<'])
-
-Resolver.add_implicit_resolver(
- u'tag:yaml.org,2002:null',
- re.compile(ur'''^(?: ~
- |null|Null|NULL
- | )$''', re.X),
- [u'~', u'n', u'N', u''])
-
-Resolver.add_implicit_resolver(
- u'tag:yaml.org,2002:timestamp',
- re.compile(ur'''^(?:[0-9][0-9][0-9][0-9]-[0-9][0-9]-[0-9][0-9]
- |[0-9][0-9][0-9][0-9] -[0-9][0-9]? -[0-9][0-9]?
- (?:[Tt]|[ \t]+)[0-9][0-9]?
- :[0-9][0-9] :[0-9][0-9] (?:\.[0-9]*)?
- (?:[ \t]*(?:Z|[-+][0-9][0-9]?(?::[0-9][0-9])?))?)$''', re.X),
- list(u'0123456789'))
-
-Resolver.add_implicit_resolver(
- u'tag:yaml.org,2002:value',
- re.compile(ur'^(?:=)$'),
- [u'='])
-
-# The following resolver is only for documentation purposes. It cannot work
-# because plain scalars cannot start with '!', '&', or '*'.
-Resolver.add_implicit_resolver(
- u'tag:yaml.org,2002:yaml',
- re.compile(ur'^(?:!|&|\*)$'),
- list(u'!&*'))
-
diff --git a/src/collectors/python.d.plugin/python_modules/pyyaml2/scanner.py b/src/collectors/python.d.plugin/python_modules/pyyaml2/scanner.py
deleted file mode 100644
index 971da6127..000000000
--- a/src/collectors/python.d.plugin/python_modules/pyyaml2/scanner.py
+++ /dev/null
@@ -1,1458 +0,0 @@
-# SPDX-License-Identifier: MIT
-
-# Scanner produces tokens of the following types:
-# STREAM-START
-# STREAM-END
-# DIRECTIVE(name, value)
-# DOCUMENT-START
-# DOCUMENT-END
-# BLOCK-SEQUENCE-START
-# BLOCK-MAPPING-START
-# BLOCK-END
-# FLOW-SEQUENCE-START
-# FLOW-MAPPING-START
-# FLOW-SEQUENCE-END
-# FLOW-MAPPING-END
-# BLOCK-ENTRY
-# FLOW-ENTRY
-# KEY
-# VALUE
-# ALIAS(value)
-# ANCHOR(value)
-# TAG(value)
-# SCALAR(value, plain, style)
-#
-# Read comments in the Scanner code for more details.
-#
-
-__all__ = ['Scanner', 'ScannerError']
-
-from error import MarkedYAMLError
-from tokens import *
-
-class ScannerError(MarkedYAMLError):
- pass
-
-class SimpleKey(object):
- # See below simple keys treatment.
-
- def __init__(self, token_number, required, index, line, column, mark):
- self.token_number = token_number
- self.required = required
- self.index = index
- self.line = line
- self.column = column
- self.mark = mark
-
-class Scanner(object):
-
- def __init__(self):
- """Initialize the scanner."""
- # It is assumed that Scanner and Reader will have a common descendant.
- # Reader do the dirty work of checking for BOM and converting the
- # input data to Unicode. It also adds NUL to the end.
- #
- # Reader supports the following methods
- # self.peek(i=0) # peek the next i-th character
- # self.prefix(l=1) # peek the next l characters
- # self.forward(l=1) # read the next l characters and move the pointer.
-
- # Had we reached the end of the stream?
- self.done = False
-
- # The number of unclosed '{' and '['. `flow_level == 0` means block
- # context.
- self.flow_level = 0
-
- # List of processed tokens that are not yet emitted.
- self.tokens = []
-
- # Add the STREAM-START token.
- self.fetch_stream_start()
-
- # Number of tokens that were emitted through the `get_token` method.
- self.tokens_taken = 0
-
- # The current indentation level.
- self.indent = -1
-
- # Past indentation levels.
- self.indents = []
-
- # Variables related to simple keys treatment.
-
- # A simple key is a key that is not denoted by the '?' indicator.
- # Example of simple keys:
- # ---
- # block simple key: value
- # ? not a simple key:
- # : { flow simple key: value }
- # We emit the KEY token before all keys, so when we find a potential
- # simple key, we try to locate the corresponding ':' indicator.
- # Simple keys should be limited to a single line and 1024 characters.
-
- # Can a simple key start at the current position? A simple key may
- # start:
- # - at the beginning of the line, not counting indentation spaces
- # (in block context),
- # - after '{', '[', ',' (in the flow context),
- # - after '?', ':', '-' (in the block context).
- # In the block context, this flag also signifies if a block collection
- # may start at the current position.
- self.allow_simple_key = True
-
- # Keep track of possible simple keys. This is a dictionary. The key
- # is `flow_level`; there can be no more that one possible simple key
- # for each level. The value is a SimpleKey record:
- # (token_number, required, index, line, column, mark)
- # A simple key may start with ALIAS, ANCHOR, TAG, SCALAR(flow),
- # '[', or '{' tokens.
- self.possible_simple_keys = {}
-
- # Public methods.
-
- def check_token(self, *choices):
- # Check if the next token is one of the given types.
- while self.need_more_tokens():
- self.fetch_more_tokens()
- if self.tokens:
- if not choices:
- return True
- for choice in choices:
- if isinstance(self.tokens[0], choice):
- return True
- return False
-
- def peek_token(self):
- # Return the next token, but do not delete if from the queue.
- while self.need_more_tokens():
- self.fetch_more_tokens()
- if self.tokens:
- return self.tokens[0]
-
- def get_token(self):
- # Return the next token.
- while self.need_more_tokens():
- self.fetch_more_tokens()
- if self.tokens:
- self.tokens_taken += 1
- return self.tokens.pop(0)
-
- # Private methods.
-
- def need_more_tokens(self):
- if self.done:
- return False
- if not self.tokens:
- return True
- # The current token may be a potential simple key, so we
- # need to look further.
- self.stale_possible_simple_keys()
- if self.next_possible_simple_key() == self.tokens_taken:
- return True
-
- def fetch_more_tokens(self):
-
- # Eat whitespaces and comments until we reach the next token.
- self.scan_to_next_token()
-
- # Remove obsolete possible simple keys.
- self.stale_possible_simple_keys()
-
- # Compare the current indentation and column. It may add some tokens
- # and decrease the current indentation level.
- self.unwind_indent(self.column)
-
- # Peek the next character.
- ch = self.peek()
-
- # Is it the end of stream?
- if ch == u'\0':
- return self.fetch_stream_end()
-
- # Is it a directive?
- if ch == u'%' and self.check_directive():
- return self.fetch_directive()
-
- # Is it the document start?
- if ch == u'-' and self.check_document_start():
- return self.fetch_document_start()
-
- # Is it the document end?
- if ch == u'.' and self.check_document_end():
- return self.fetch_document_end()
-
- # TODO: support for BOM within a stream.
- #if ch == u'\uFEFF':
- # return self.fetch_bom() <-- issue BOMToken
-
- # Note: the order of the following checks is NOT significant.
-
- # Is it the flow sequence start indicator?
- if ch == u'[':
- return self.fetch_flow_sequence_start()
-
- # Is it the flow mapping start indicator?
- if ch == u'{':
- return self.fetch_flow_mapping_start()
-
- # Is it the flow sequence end indicator?
- if ch == u']':
- return self.fetch_flow_sequence_end()
-
- # Is it the flow mapping end indicator?
- if ch == u'}':
- return self.fetch_flow_mapping_end()
-
- # Is it the flow entry indicator?
- if ch == u',':
- return self.fetch_flow_entry()
-
- # Is it the block entry indicator?
- if ch == u'-' and self.check_block_entry():
- return self.fetch_block_entry()
-
- # Is it the key indicator?
- if ch == u'?' and self.check_key():
- return self.fetch_key()
-
- # Is it the value indicator?
- if ch == u':' and self.check_value():
- return self.fetch_value()
-
- # Is it an alias?
- if ch == u'*':
- return self.fetch_alias()
-
- # Is it an anchor?
- if ch == u'&':
- return self.fetch_anchor()
-
- # Is it a tag?
- if ch == u'!':
- return self.fetch_tag()
-
- # Is it a literal scalar?
- if ch == u'|' and not self.flow_level:
- return self.fetch_literal()
-
- # Is it a folded scalar?
- if ch == u'>' and not self.flow_level:
- return self.fetch_folded()
-
- # Is it a single quoted scalar?
- if ch == u'\'':
- return self.fetch_single()
-
- # Is it a double quoted scalar?
- if ch == u'\"':
- return self.fetch_double()
-
- # It must be a plain scalar then.
- if self.check_plain():
- return self.fetch_plain()
-
- # No? It's an error. Let's produce a nice error message.
- raise ScannerError("while scanning for the next token", None,
- "found character %r that cannot start any token"
- % ch.encode('utf-8'), self.get_mark())
-
- # Simple keys treatment.
-
- def next_possible_simple_key(self):
- # Return the number of the nearest possible simple key. Actually we
- # don't need to loop through the whole dictionary. We may replace it
- # with the following code:
- # if not self.possible_simple_keys:
- # return None
- # return self.possible_simple_keys[
- # min(self.possible_simple_keys.keys())].token_number
- min_token_number = None
- for level in self.possible_simple_keys:
- key = self.possible_simple_keys[level]
- if min_token_number is None or key.token_number < min_token_number:
- min_token_number = key.token_number
- return min_token_number
-
- def stale_possible_simple_keys(self):
- # Remove entries that are no longer possible simple keys. According to
- # the YAML specification, simple keys
- # - should be limited to a single line,
- # - should be no longer than 1024 characters.
- # Disabling this procedure will allow simple keys of any length and
- # height (may cause problems if indentation is broken though).
- for level in self.possible_simple_keys.keys():
- key = self.possible_simple_keys[level]
- if key.line != self.line \
- or self.index-key.index > 1024:
- if key.required:
- raise ScannerError("while scanning a simple key", key.mark,
- "could not found expected ':'", self.get_mark())
- del self.possible_simple_keys[level]
-
- def save_possible_simple_key(self):
- # The next token may start a simple key. We check if it's possible
- # and save its position. This function is called for
- # ALIAS, ANCHOR, TAG, SCALAR(flow), '[', and '{'.
-
- # Check if a simple key is required at the current position.
- required = not self.flow_level and self.indent == self.column
-
- # A simple key is required only if it is the first token in the current
- # line. Therefore it is always allowed.
- assert self.allow_simple_key or not required
-
- # The next token might be a simple key. Let's save it's number and
- # position.
- if self.allow_simple_key:
- self.remove_possible_simple_key()
- token_number = self.tokens_taken+len(self.tokens)
- key = SimpleKey(token_number, required,
- self.index, self.line, self.column, self.get_mark())
- self.possible_simple_keys[self.flow_level] = key
-
- def remove_possible_simple_key(self):
- # Remove the saved possible key position at the current flow level.
- if self.flow_level in self.possible_simple_keys:
- key = self.possible_simple_keys[self.flow_level]
-
- if key.required:
- raise ScannerError("while scanning a simple key", key.mark,
- "could not found expected ':'", self.get_mark())
-
- del self.possible_simple_keys[self.flow_level]
-
- # Indentation functions.
-
- def unwind_indent(self, column):
-
- ## In flow context, tokens should respect indentation.
- ## Actually the condition should be `self.indent >= column` according to
- ## the spec. But this condition will prohibit intuitively correct
- ## constructions such as
- ## key : {
- ## }
- #if self.flow_level and self.indent > column:
- # raise ScannerError(None, None,
- # "invalid intendation or unclosed '[' or '{'",
- # self.get_mark())
-
- # In the flow context, indentation is ignored. We make the scanner less
- # restrictive then specification requires.
- if self.flow_level:
- return
-
- # In block context, we may need to issue the BLOCK-END tokens.
- while self.indent > column:
- mark = self.get_mark()
- self.indent = self.indents.pop()
- self.tokens.append(BlockEndToken(mark, mark))
-
- def add_indent(self, column):
- # Check if we need to increase indentation.
- if self.indent < column:
- self.indents.append(self.indent)
- self.indent = column
- return True
- return False
-
- # Fetchers.
-
- def fetch_stream_start(self):
- # We always add STREAM-START as the first token and STREAM-END as the
- # last token.
-
- # Read the token.
- mark = self.get_mark()
-
- # Add STREAM-START.
- self.tokens.append(StreamStartToken(mark, mark,
- encoding=self.encoding))
-
-
- def fetch_stream_end(self):
-
- # Set the current intendation to -1.
- self.unwind_indent(-1)
-
- # Reset simple keys.
- self.remove_possible_simple_key()
- self.allow_simple_key = False
- self.possible_simple_keys = {}
-
- # Read the token.
- mark = self.get_mark()
-
- # Add STREAM-END.
- self.tokens.append(StreamEndToken(mark, mark))
-
- # The steam is finished.
- self.done = True
-
- def fetch_directive(self):
-
- # Set the current intendation to -1.
- self.unwind_indent(-1)
-
- # Reset simple keys.
- self.remove_possible_simple_key()
- self.allow_simple_key = False
-
- # Scan and add DIRECTIVE.
- self.tokens.append(self.scan_directive())
-
- def fetch_document_start(self):
- self.fetch_document_indicator(DocumentStartToken)
-
- def fetch_document_end(self):
- self.fetch_document_indicator(DocumentEndToken)
-
- def fetch_document_indicator(self, TokenClass):
-
- # Set the current intendation to -1.
- self.unwind_indent(-1)
-
- # Reset simple keys. Note that there could not be a block collection
- # after '---'.
- self.remove_possible_simple_key()
- self.allow_simple_key = False
-
- # Add DOCUMENT-START or DOCUMENT-END.
- start_mark = self.get_mark()
- self.forward(3)
- end_mark = self.get_mark()
- self.tokens.append(TokenClass(start_mark, end_mark))
-
- def fetch_flow_sequence_start(self):
- self.fetch_flow_collection_start(FlowSequenceStartToken)
-
- def fetch_flow_mapping_start(self):
- self.fetch_flow_collection_start(FlowMappingStartToken)
-
- def fetch_flow_collection_start(self, TokenClass):
-
- # '[' and '{' may start a simple key.
- self.save_possible_simple_key()
-
- # Increase the flow level.
- self.flow_level += 1
-
- # Simple keys are allowed after '[' and '{'.
- self.allow_simple_key = True
-
- # Add FLOW-SEQUENCE-START or FLOW-MAPPING-START.
- start_mark = self.get_mark()
- self.forward()
- end_mark = self.get_mark()
- self.tokens.append(TokenClass(start_mark, end_mark))
-
- def fetch_flow_sequence_end(self):
- self.fetch_flow_collection_end(FlowSequenceEndToken)
-
- def fetch_flow_mapping_end(self):
- self.fetch_flow_collection_end(FlowMappingEndToken)
-
- def fetch_flow_collection_end(self, TokenClass):
-
- # Reset possible simple key on the current level.
- self.remove_possible_simple_key()
-
- # Decrease the flow level.
- self.flow_level -= 1
-
- # No simple keys after ']' or '}'.
- self.allow_simple_key = False
-
- # Add FLOW-SEQUENCE-END or FLOW-MAPPING-END.
- start_mark = self.get_mark()
- self.forward()
- end_mark = self.get_mark()
- self.tokens.append(TokenClass(start_mark, end_mark))
-
- def fetch_flow_entry(self):
-
- # Simple keys are allowed after ','.
- self.allow_simple_key = True
-
- # Reset possible simple key on the current level.
- self.remove_possible_simple_key()
-
- # Add FLOW-ENTRY.
- start_mark = self.get_mark()
- self.forward()
- end_mark = self.get_mark()
- self.tokens.append(FlowEntryToken(start_mark, end_mark))
-
- def fetch_block_entry(self):
-
- # Block context needs additional checks.
- if not self.flow_level:
-
- # Are we allowed to start a new entry?
- if not self.allow_simple_key:
- raise ScannerError(None, None,
- "sequence entries are not allowed here",
- self.get_mark())
-
- # We may need to add BLOCK-SEQUENCE-START.
- if self.add_indent(self.column):
- mark = self.get_mark()
- self.tokens.append(BlockSequenceStartToken(mark, mark))
-
- # It's an error for the block entry to occur in the flow context,
- # but we let the parser detect this.
- else:
- pass
-
- # Simple keys are allowed after '-'.
- self.allow_simple_key = True
-
- # Reset possible simple key on the current level.
- self.remove_possible_simple_key()
-
- # Add BLOCK-ENTRY.
- start_mark = self.get_mark()
- self.forward()
- end_mark = self.get_mark()
- self.tokens.append(BlockEntryToken(start_mark, end_mark))
-
- def fetch_key(self):
-
- # Block context needs additional checks.
- if not self.flow_level:
-
- # Are we allowed to start a key (not nessesary a simple)?
- if not self.allow_simple_key:
- raise ScannerError(None, None,
- "mapping keys are not allowed here",
- self.get_mark())
-
- # We may need to add BLOCK-MAPPING-START.
- if self.add_indent(self.column):
- mark = self.get_mark()
- self.tokens.append(BlockMappingStartToken(mark, mark))
-
- # Simple keys are allowed after '?' in the block context.
- self.allow_simple_key = not self.flow_level
-
- # Reset possible simple key on the current level.
- self.remove_possible_simple_key()
-
- # Add KEY.
- start_mark = self.get_mark()
- self.forward()
- end_mark = self.get_mark()
- self.tokens.append(KeyToken(start_mark, end_mark))
-
- def fetch_value(self):
-
- # Do we determine a simple key?
- if self.flow_level in self.possible_simple_keys:
-
- # Add KEY.
- key = self.possible_simple_keys[self.flow_level]
- del self.possible_simple_keys[self.flow_level]
- self.tokens.insert(key.token_number-self.tokens_taken,
- KeyToken(key.mark, key.mark))
-
- # If this key starts a new block mapping, we need to add
- # BLOCK-MAPPING-START.
- if not self.flow_level:
- if self.add_indent(key.column):
- self.tokens.insert(key.token_number-self.tokens_taken,
- BlockMappingStartToken(key.mark, key.mark))
-
- # There cannot be two simple keys one after another.
- self.allow_simple_key = False
-
- # It must be a part of a complex key.
- else:
-
- # Block context needs additional checks.
- # (Do we really need them? They will be catched by the parser
- # anyway.)
- if not self.flow_level:
-
- # We are allowed to start a complex value if and only if
- # we can start a simple key.
- if not self.allow_simple_key:
- raise ScannerError(None, None,
- "mapping values are not allowed here",
- self.get_mark())
-
- # If this value starts a new block mapping, we need to add
- # BLOCK-MAPPING-START. It will be detected as an error later by
- # the parser.
- if not self.flow_level:
- if self.add_indent(self.column):
- mark = self.get_mark()
- self.tokens.append(BlockMappingStartToken(mark, mark))
-
- # Simple keys are allowed after ':' in the block context.
- self.allow_simple_key = not self.flow_level
-
- # Reset possible simple key on the current level.
- self.remove_possible_simple_key()
-
- # Add VALUE.
- start_mark = self.get_mark()
- self.forward()
- end_mark = self.get_mark()
- self.tokens.append(ValueToken(start_mark, end_mark))
-
- def fetch_alias(self):
-
- # ALIAS could be a simple key.
- self.save_possible_simple_key()
-
- # No simple keys after ALIAS.
- self.allow_simple_key = False
-
- # Scan and add ALIAS.
- self.tokens.append(self.scan_anchor(AliasToken))
-
- def fetch_anchor(self):
-
- # ANCHOR could start a simple key.
- self.save_possible_simple_key()
-
- # No simple keys after ANCHOR.
- self.allow_simple_key = False
-
- # Scan and add ANCHOR.
- self.tokens.append(self.scan_anchor(AnchorToken))
-
- def fetch_tag(self):
-
- # TAG could start a simple key.
- self.save_possible_simple_key()
-
- # No simple keys after TAG.
- self.allow_simple_key = False
-
- # Scan and add TAG.
- self.tokens.append(self.scan_tag())
-
- def fetch_literal(self):
- self.fetch_block_scalar(style='|')
-
- def fetch_folded(self):
- self.fetch_block_scalar(style='>')
-
- def fetch_block_scalar(self, style):
-
- # A simple key may follow a block scalar.
- self.allow_simple_key = True
-
- # Reset possible simple key on the current level.
- self.remove_possible_simple_key()
-
- # Scan and add SCALAR.
- self.tokens.append(self.scan_block_scalar(style))
-
- def fetch_single(self):
- self.fetch_flow_scalar(style='\'')
-
- def fetch_double(self):
- self.fetch_flow_scalar(style='"')
-
- def fetch_flow_scalar(self, style):
-
- # A flow scalar could be a simple key.
- self.save_possible_simple_key()
-
- # No simple keys after flow scalars.
- self.allow_simple_key = False
-
- # Scan and add SCALAR.
- self.tokens.append(self.scan_flow_scalar(style))
-
- def fetch_plain(self):
-
- # A plain scalar could be a simple key.
- self.save_possible_simple_key()
-
- # No simple keys after plain scalars. But note that `scan_plain` will
- # change this flag if the scan is finished at the beginning of the
- # line.
- self.allow_simple_key = False
-
- # Scan and add SCALAR. May change `allow_simple_key`.
- self.tokens.append(self.scan_plain())
-
- # Checkers.
-
- def check_directive(self):
-
- # DIRECTIVE: ^ '%' ...
- # The '%' indicator is already checked.
- if self.column == 0:
- return True
-
- def check_document_start(self):
-
- # DOCUMENT-START: ^ '---' (' '|'\n')
- if self.column == 0:
- if self.prefix(3) == u'---' \
- and self.peek(3) in u'\0 \t\r\n\x85\u2028\u2029':
- return True
-
- def check_document_end(self):
-
- # DOCUMENT-END: ^ '...' (' '|'\n')
- if self.column == 0:
- if self.prefix(3) == u'...' \
- and self.peek(3) in u'\0 \t\r\n\x85\u2028\u2029':
- return True
-
- def check_block_entry(self):
-
- # BLOCK-ENTRY: '-' (' '|'\n')
- return self.peek(1) in u'\0 \t\r\n\x85\u2028\u2029'
-
- def check_key(self):
-
- # KEY(flow context): '?'
- if self.flow_level:
- return True
-
- # KEY(block context): '?' (' '|'\n')
- else:
- return self.peek(1) in u'\0 \t\r\n\x85\u2028\u2029'
-
- def check_value(self):
-
- # VALUE(flow context): ':'
- if self.flow_level:
- return True
-
- # VALUE(block context): ':' (' '|'\n')
- else:
- return self.peek(1) in u'\0 \t\r\n\x85\u2028\u2029'
-
- def check_plain(self):
-
- # A plain scalar may start with any non-space character except:
- # '-', '?', ':', ',', '[', ']', '{', '}',
- # '#', '&', '*', '!', '|', '>', '\'', '\"',
- # '%', '@', '`'.
- #
- # It may also start with
- # '-', '?', ':'
- # if it is followed by a non-space character.
- #
- # Note that we limit the last rule to the block context (except the
- # '-' character) because we want the flow context to be space
- # independent.
- ch = self.peek()
- return ch not in u'\0 \t\r\n\x85\u2028\u2029-?:,[]{}#&*!|>\'\"%@`' \
- or (self.peek(1) not in u'\0 \t\r\n\x85\u2028\u2029'
- and (ch == u'-' or (not self.flow_level and ch in u'?:')))
-
- # Scanners.
-
- def scan_to_next_token(self):
- # We ignore spaces, line breaks and comments.
- # If we find a line break in the block context, we set the flag
- # `allow_simple_key` on.
- # The byte order mark is stripped if it's the first character in the
- # stream. We do not yet support BOM inside the stream as the
- # specification requires. Any such mark will be considered as a part
- # of the document.
- #
- # TODO: We need to make tab handling rules more sane. A good rule is
- # Tabs cannot precede tokens
- # BLOCK-SEQUENCE-START, BLOCK-MAPPING-START, BLOCK-END,
- # KEY(block), VALUE(block), BLOCK-ENTRY
- # So the checking code is
- # if <TAB>:
- # self.allow_simple_keys = False
- # We also need to add the check for `allow_simple_keys == True` to
- # `unwind_indent` before issuing BLOCK-END.
- # Scanners for block, flow, and plain scalars need to be modified.
-
- if self.index == 0 and self.peek() == u'\uFEFF':
- self.forward()
- found = False
- while not found:
- while self.peek() == u' ':
- self.forward()
- if self.peek() == u'#':
- while self.peek() not in u'\0\r\n\x85\u2028\u2029':
- self.forward()
- if self.scan_line_break():
- if not self.flow_level:
- self.allow_simple_key = True
- else:
- found = True
-
- def scan_directive(self):
- # See the specification for details.
- start_mark = self.get_mark()
- self.forward()
- name = self.scan_directive_name(start_mark)
- value = None
- if name == u'YAML':
- value = self.scan_yaml_directive_value(start_mark)
- end_mark = self.get_mark()
- elif name == u'TAG':
- value = self.scan_tag_directive_value(start_mark)
- end_mark = self.get_mark()
- else:
- end_mark = self.get_mark()
- while self.peek() not in u'\0\r\n\x85\u2028\u2029':
- self.forward()
- self.scan_directive_ignored_line(start_mark)
- return DirectiveToken(name, value, start_mark, end_mark)
-
- def scan_directive_name(self, start_mark):
- # See the specification for details.
- length = 0
- ch = self.peek(length)
- while u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \
- or ch in u'-_':
- length += 1
- ch = self.peek(length)
- if not length:
- raise ScannerError("while scanning a directive", start_mark,
- "expected alphabetic or numeric character, but found %r"
- % ch.encode('utf-8'), self.get_mark())
- value = self.prefix(length)
- self.forward(length)
- ch = self.peek()
- if ch not in u'\0 \r\n\x85\u2028\u2029':
- raise ScannerError("while scanning a directive", start_mark,
- "expected alphabetic or numeric character, but found %r"
- % ch.encode('utf-8'), self.get_mark())
- return value
-
- def scan_yaml_directive_value(self, start_mark):
- # See the specification for details.
- while self.peek() == u' ':
- self.forward()
- major = self.scan_yaml_directive_number(start_mark)
- if self.peek() != '.':
- raise ScannerError("while scanning a directive", start_mark,
- "expected a digit or '.', but found %r"
- % self.peek().encode('utf-8'),
- self.get_mark())
- self.forward()
- minor = self.scan_yaml_directive_number(start_mark)
- if self.peek() not in u'\0 \r\n\x85\u2028\u2029':
- raise ScannerError("while scanning a directive", start_mark,
- "expected a digit or ' ', but found %r"
- % self.peek().encode('utf-8'),
- self.get_mark())
- return (major, minor)
-
- def scan_yaml_directive_number(self, start_mark):
- # See the specification for details.
- ch = self.peek()
- if not (u'0' <= ch <= u'9'):
- raise ScannerError("while scanning a directive", start_mark,
- "expected a digit, but found %r" % ch.encode('utf-8'),
- self.get_mark())
- length = 0
- while u'0' <= self.peek(length) <= u'9':
- length += 1
- value = int(self.prefix(length))
- self.forward(length)
- return value
-
- def scan_tag_directive_value(self, start_mark):
- # See the specification for details.
- while self.peek() == u' ':
- self.forward()
- handle = self.scan_tag_directive_handle(start_mark)
- while self.peek() == u' ':
- self.forward()
- prefix = self.scan_tag_directive_prefix(start_mark)
- return (handle, prefix)
-
- def scan_tag_directive_handle(self, start_mark):
- # See the specification for details.
- value = self.scan_tag_handle('directive', start_mark)
- ch = self.peek()
- if ch != u' ':
- raise ScannerError("while scanning a directive", start_mark,
- "expected ' ', but found %r" % ch.encode('utf-8'),
- self.get_mark())
- return value
-
- def scan_tag_directive_prefix(self, start_mark):
- # See the specification for details.
- value = self.scan_tag_uri('directive', start_mark)
- ch = self.peek()
- if ch not in u'\0 \r\n\x85\u2028\u2029':
- raise ScannerError("while scanning a directive", start_mark,
- "expected ' ', but found %r" % ch.encode('utf-8'),
- self.get_mark())
- return value
-
- def scan_directive_ignored_line(self, start_mark):
- # See the specification for details.
- while self.peek() == u' ':
- self.forward()
- if self.peek() == u'#':
- while self.peek() not in u'\0\r\n\x85\u2028\u2029':
- self.forward()
- ch = self.peek()
- if ch not in u'\0\r\n\x85\u2028\u2029':
- raise ScannerError("while scanning a directive", start_mark,
- "expected a comment or a line break, but found %r"
- % ch.encode('utf-8'), self.get_mark())
- self.scan_line_break()
-
- def scan_anchor(self, TokenClass):
- # The specification does not restrict characters for anchors and
- # aliases. This may lead to problems, for instance, the document:
- # [ *alias, value ]
- # can be interpteted in two ways, as
- # [ "value" ]
- # and
- # [ *alias , "value" ]
- # Therefore we restrict aliases to numbers and ASCII letters.
- start_mark = self.get_mark()
- indicator = self.peek()
- if indicator == u'*':
- name = 'alias'
- else:
- name = 'anchor'
- self.forward()
- length = 0
- ch = self.peek(length)
- while u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \
- or ch in u'-_':
- length += 1
- ch = self.peek(length)
- if not length:
- raise ScannerError("while scanning an %s" % name, start_mark,
- "expected alphabetic or numeric character, but found %r"
- % ch.encode('utf-8'), self.get_mark())
- value = self.prefix(length)
- self.forward(length)
- ch = self.peek()
- if ch not in u'\0 \t\r\n\x85\u2028\u2029?:,]}%@`':
- raise ScannerError("while scanning an %s" % name, start_mark,
- "expected alphabetic or numeric character, but found %r"
- % ch.encode('utf-8'), self.get_mark())
- end_mark = self.get_mark()
- return TokenClass(value, start_mark, end_mark)
-
- def scan_tag(self):
- # See the specification for details.
- start_mark = self.get_mark()
- ch = self.peek(1)
- if ch == u'<':
- handle = None
- self.forward(2)
- suffix = self.scan_tag_uri('tag', start_mark)
- if self.peek() != u'>':
- raise ScannerError("while parsing a tag", start_mark,
- "expected '>', but found %r" % self.peek().encode('utf-8'),
- self.get_mark())
- self.forward()
- elif ch in u'\0 \t\r\n\x85\u2028\u2029':
- handle = None
- suffix = u'!'
- self.forward()
- else:
- length = 1
- use_handle = False
- while ch not in u'\0 \r\n\x85\u2028\u2029':
- if ch == u'!':
- use_handle = True
- break
- length += 1
- ch = self.peek(length)
- handle = u'!'
- if use_handle:
- handle = self.scan_tag_handle('tag', start_mark)
- else:
- handle = u'!'
- self.forward()
- suffix = self.scan_tag_uri('tag', start_mark)
- ch = self.peek()
- if ch not in u'\0 \r\n\x85\u2028\u2029':
- raise ScannerError("while scanning a tag", start_mark,
- "expected ' ', but found %r" % ch.encode('utf-8'),
- self.get_mark())
- value = (handle, suffix)
- end_mark = self.get_mark()
- return TagToken(value, start_mark, end_mark)
-
- def scan_block_scalar(self, style):
- # See the specification for details.
-
- if style == '>':
- folded = True
- else:
- folded = False
-
- chunks = []
- start_mark = self.get_mark()
-
- # Scan the header.
- self.forward()
- chomping, increment = self.scan_block_scalar_indicators(start_mark)
- self.scan_block_scalar_ignored_line(start_mark)
-
- # Determine the indentation level and go to the first non-empty line.
- min_indent = self.indent+1
- if min_indent < 1:
- min_indent = 1
- if increment is None:
- breaks, max_indent, end_mark = self.scan_block_scalar_indentation()
- indent = max(min_indent, max_indent)
- else:
- indent = min_indent+increment-1
- breaks, end_mark = self.scan_block_scalar_breaks(indent)
- line_break = u''
-
- # Scan the inner part of the block scalar.
- while self.column == indent and self.peek() != u'\0':
- chunks.extend(breaks)
- leading_non_space = self.peek() not in u' \t'
- length = 0
- while self.peek(length) not in u'\0\r\n\x85\u2028\u2029':
- length += 1
- chunks.append(self.prefix(length))
- self.forward(length)
- line_break = self.scan_line_break()
- breaks, end_mark = self.scan_block_scalar_breaks(indent)
- if self.column == indent and self.peek() != u'\0':
-
- # Unfortunately, folding rules are ambiguous.
- #
- # This is the folding according to the specification:
-
- if folded and line_break == u'\n' \
- and leading_non_space and self.peek() not in u' \t':
- if not breaks:
- chunks.append(u' ')
- else:
- chunks.append(line_break)
-
- # This is Clark Evans's interpretation (also in the spec
- # examples):
- #
- #if folded and line_break == u'\n':
- # if not breaks:
- # if self.peek() not in ' \t':
- # chunks.append(u' ')
- # else:
- # chunks.append(line_break)
- #else:
- # chunks.append(line_break)
- else:
- break
-
- # Chomp the tail.
- if chomping is not False:
- chunks.append(line_break)
- if chomping is True:
- chunks.extend(breaks)
-
- # We are done.
- return ScalarToken(u''.join(chunks), False, start_mark, end_mark,
- style)
-
- def scan_block_scalar_indicators(self, start_mark):
- # See the specification for details.
- chomping = None
- increment = None
- ch = self.peek()
- if ch in u'+-':
- if ch == '+':
- chomping = True
- else:
- chomping = False
- self.forward()
- ch = self.peek()
- if ch in u'0123456789':
- increment = int(ch)
- if increment == 0:
- raise ScannerError("while scanning a block scalar", start_mark,
- "expected indentation indicator in the range 1-9, but found 0",
- self.get_mark())
- self.forward()
- elif ch in u'0123456789':
- increment = int(ch)
- if increment == 0:
- raise ScannerError("while scanning a block scalar", start_mark,
- "expected indentation indicator in the range 1-9, but found 0",
- self.get_mark())
- self.forward()
- ch = self.peek()
- if ch in u'+-':
- if ch == '+':
- chomping = True
- else:
- chomping = False
- self.forward()
- ch = self.peek()
- if ch not in u'\0 \r\n\x85\u2028\u2029':
- raise ScannerError("while scanning a block scalar", start_mark,
- "expected chomping or indentation indicators, but found %r"
- % ch.encode('utf-8'), self.get_mark())
- return chomping, increment
-
- def scan_block_scalar_ignored_line(self, start_mark):
- # See the specification for details.
- while self.peek() == u' ':
- self.forward()
- if self.peek() == u'#':
- while self.peek() not in u'\0\r\n\x85\u2028\u2029':
- self.forward()
- ch = self.peek()
- if ch not in u'\0\r\n\x85\u2028\u2029':
- raise ScannerError("while scanning a block scalar", start_mark,
- "expected a comment or a line break, but found %r"
- % ch.encode('utf-8'), self.get_mark())
- self.scan_line_break()
-
- def scan_block_scalar_indentation(self):
- # See the specification for details.
- chunks = []
- max_indent = 0
- end_mark = self.get_mark()
- while self.peek() in u' \r\n\x85\u2028\u2029':
- if self.peek() != u' ':
- chunks.append(self.scan_line_break())
- end_mark = self.get_mark()
- else:
- self.forward()
- if self.column > max_indent:
- max_indent = self.column
- return chunks, max_indent, end_mark
-
- def scan_block_scalar_breaks(self, indent):
- # See the specification for details.
- chunks = []
- end_mark = self.get_mark()
- while self.column < indent and self.peek() == u' ':
- self.forward()
- while self.peek() in u'\r\n\x85\u2028\u2029':
- chunks.append(self.scan_line_break())
- end_mark = self.get_mark()
- while self.column < indent and self.peek() == u' ':
- self.forward()
- return chunks, end_mark
-
- def scan_flow_scalar(self, style):
- # See the specification for details.
- # Note that we loose indentation rules for quoted scalars. Quoted
- # scalars don't need to adhere indentation because " and ' clearly
- # mark the beginning and the end of them. Therefore we are less
- # restrictive then the specification requires. We only need to check
- # that document separators are not included in scalars.
- if style == '"':
- double = True
- else:
- double = False
- chunks = []
- start_mark = self.get_mark()
- quote = self.peek()
- self.forward()
- chunks.extend(self.scan_flow_scalar_non_spaces(double, start_mark))
- while self.peek() != quote:
- chunks.extend(self.scan_flow_scalar_spaces(double, start_mark))
- chunks.extend(self.scan_flow_scalar_non_spaces(double, start_mark))
- self.forward()
- end_mark = self.get_mark()
- return ScalarToken(u''.join(chunks), False, start_mark, end_mark,
- style)
-
- ESCAPE_REPLACEMENTS = {
- u'0': u'\0',
- u'a': u'\x07',
- u'b': u'\x08',
- u't': u'\x09',
- u'\t': u'\x09',
- u'n': u'\x0A',
- u'v': u'\x0B',
- u'f': u'\x0C',
- u'r': u'\x0D',
- u'e': u'\x1B',
- u' ': u'\x20',
- u'\"': u'\"',
- u'\\': u'\\',
- u'N': u'\x85',
- u'_': u'\xA0',
- u'L': u'\u2028',
- u'P': u'\u2029',
- }
-
- ESCAPE_CODES = {
- u'x': 2,
- u'u': 4,
- u'U': 8,
- }
-
- def scan_flow_scalar_non_spaces(self, double, start_mark):
- # See the specification for details.
- chunks = []
- while True:
- length = 0
- while self.peek(length) not in u'\'\"\\\0 \t\r\n\x85\u2028\u2029':
- length += 1
- if length:
- chunks.append(self.prefix(length))
- self.forward(length)
- ch = self.peek()
- if not double and ch == u'\'' and self.peek(1) == u'\'':
- chunks.append(u'\'')
- self.forward(2)
- elif (double and ch == u'\'') or (not double and ch in u'\"\\'):
- chunks.append(ch)
- self.forward()
- elif double and ch == u'\\':
- self.forward()
- ch = self.peek()
- if ch in self.ESCAPE_REPLACEMENTS:
- chunks.append(self.ESCAPE_REPLACEMENTS[ch])
- self.forward()
- elif ch in self.ESCAPE_CODES:
- length = self.ESCAPE_CODES[ch]
- self.forward()
- for k in range(length):
- if self.peek(k) not in u'0123456789ABCDEFabcdef':
- raise ScannerError("while scanning a double-quoted scalar", start_mark,
- "expected escape sequence of %d hexdecimal numbers, but found %r" %
- (length, self.peek(k).encode('utf-8')), self.get_mark())
- code = int(self.prefix(length), 16)
- chunks.append(unichr(code))
- self.forward(length)
- elif ch in u'\r\n\x85\u2028\u2029':
- self.scan_line_break()
- chunks.extend(self.scan_flow_scalar_breaks(double, start_mark))
- else:
- raise ScannerError("while scanning a double-quoted scalar", start_mark,
- "found unknown escape character %r" % ch.encode('utf-8'), self.get_mark())
- else:
- return chunks
-
- def scan_flow_scalar_spaces(self, double, start_mark):
- # See the specification for details.
- chunks = []
- length = 0
- while self.peek(length) in u' \t':
- length += 1
- whitespaces = self.prefix(length)
- self.forward(length)
- ch = self.peek()
- if ch == u'\0':
- raise ScannerError("while scanning a quoted scalar", start_mark,
- "found unexpected end of stream", self.get_mark())
- elif ch in u'\r\n\x85\u2028\u2029':
- line_break = self.scan_line_break()
- breaks = self.scan_flow_scalar_breaks(double, start_mark)
- if line_break != u'\n':
- chunks.append(line_break)
- elif not breaks:
- chunks.append(u' ')
- chunks.extend(breaks)
- else:
- chunks.append(whitespaces)
- return chunks
-
- def scan_flow_scalar_breaks(self, double, start_mark):
- # See the specification for details.
- chunks = []
- while True:
- # Instead of checking indentation, we check for document
- # separators.
- prefix = self.prefix(3)
- if (prefix == u'---' or prefix == u'...') \
- and self.peek(3) in u'\0 \t\r\n\x85\u2028\u2029':
- raise ScannerError("while scanning a quoted scalar", start_mark,
- "found unexpected document separator", self.get_mark())
- while self.peek() in u' \t':
- self.forward()
- if self.peek() in u'\r\n\x85\u2028\u2029':
- chunks.append(self.scan_line_break())
- else:
- return chunks
-
- def scan_plain(self):
- # See the specification for details.
- # We add an additional restriction for the flow context:
- # plain scalars in the flow context cannot contain ',', ':' and '?'.
- # We also keep track of the `allow_simple_key` flag here.
- # Indentation rules are loosed for the flow context.
- chunks = []
- start_mark = self.get_mark()
- end_mark = start_mark
- indent = self.indent+1
- # We allow zero indentation for scalars, but then we need to check for
- # document separators at the beginning of the line.
- #if indent == 0:
- # indent = 1
- spaces = []
- while True:
- length = 0
- if self.peek() == u'#':
- break
- while True:
- ch = self.peek(length)
- if ch in u'\0 \t\r\n\x85\u2028\u2029' \
- or (not self.flow_level and ch == u':' and
- self.peek(length+1) in u'\0 \t\r\n\x85\u2028\u2029') \
- or (self.flow_level and ch in u',:?[]{}'):
- break
- length += 1
- # It's not clear what we should do with ':' in the flow context.
- if (self.flow_level and ch == u':'
- and self.peek(length+1) not in u'\0 \t\r\n\x85\u2028\u2029,[]{}'):
- self.forward(length)
- raise ScannerError("while scanning a plain scalar", start_mark,
- "found unexpected ':'", self.get_mark(),
- "Please check http://pyyaml.org/wiki/YAMLColonInFlowContext for details.")
- if length == 0:
- break
- self.allow_simple_key = False
- chunks.extend(spaces)
- chunks.append(self.prefix(length))
- self.forward(length)
- end_mark = self.get_mark()
- spaces = self.scan_plain_spaces(indent, start_mark)
- if not spaces or self.peek() == u'#' \
- or (not self.flow_level and self.column < indent):
- break
- return ScalarToken(u''.join(chunks), True, start_mark, end_mark)
-
- def scan_plain_spaces(self, indent, start_mark):
- # See the specification for details.
- # The specification is really confusing about tabs in plain scalars.
- # We just forbid them completely. Do not use tabs in YAML!
- chunks = []
- length = 0
- while self.peek(length) in u' ':
- length += 1
- whitespaces = self.prefix(length)
- self.forward(length)
- ch = self.peek()
- if ch in u'\r\n\x85\u2028\u2029':
- line_break = self.scan_line_break()
- self.allow_simple_key = True
- prefix = self.prefix(3)
- if (prefix == u'---' or prefix == u'...') \
- and self.peek(3) in u'\0 \t\r\n\x85\u2028\u2029':
- return
- breaks = []
- while self.peek() in u' \r\n\x85\u2028\u2029':
- if self.peek() == ' ':
- self.forward()
- else:
- breaks.append(self.scan_line_break())
- prefix = self.prefix(3)
- if (prefix == u'---' or prefix == u'...') \
- and self.peek(3) in u'\0 \t\r\n\x85\u2028\u2029':
- return
- if line_break != u'\n':
- chunks.append(line_break)
- elif not breaks:
- chunks.append(u' ')
- chunks.extend(breaks)
- elif whitespaces:
- chunks.append(whitespaces)
- return chunks
-
- def scan_tag_handle(self, name, start_mark):
- # See the specification for details.
- # For some strange reasons, the specification does not allow '_' in
- # tag handles. I have allowed it anyway.
- ch = self.peek()
- if ch != u'!':
- raise ScannerError("while scanning a %s" % name, start_mark,
- "expected '!', but found %r" % ch.encode('utf-8'),
- self.get_mark())
- length = 1
- ch = self.peek(length)
- if ch != u' ':
- while u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \
- or ch in u'-_':
- length += 1
- ch = self.peek(length)
- if ch != u'!':
- self.forward(length)
- raise ScannerError("while scanning a %s" % name, start_mark,
- "expected '!', but found %r" % ch.encode('utf-8'),
- self.get_mark())
- length += 1
- value = self.prefix(length)
- self.forward(length)
- return value
-
- def scan_tag_uri(self, name, start_mark):
- # See the specification for details.
- # Note: we do not check if URI is well-formed.
- chunks = []
- length = 0
- ch = self.peek(length)
- while u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \
- or ch in u'-;/?:@&=+$,_.!~*\'()[]%':
- if ch == u'%':
- chunks.append(self.prefix(length))
- self.forward(length)
- length = 0
- chunks.append(self.scan_uri_escapes(name, start_mark))
- else:
- length += 1
- ch = self.peek(length)
- if length:
- chunks.append(self.prefix(length))
- self.forward(length)
- length = 0
- if not chunks:
- raise ScannerError("while parsing a %s" % name, start_mark,
- "expected URI, but found %r" % ch.encode('utf-8'),
- self.get_mark())
- return u''.join(chunks)
-
- def scan_uri_escapes(self, name, start_mark):
- # See the specification for details.
- bytes = []
- mark = self.get_mark()
- while self.peek() == u'%':
- self.forward()
- for k in range(2):
- if self.peek(k) not in u'0123456789ABCDEFabcdef':
- raise ScannerError("while scanning a %s" % name, start_mark,
- "expected URI escape sequence of 2 hexdecimal numbers, but found %r" %
- (self.peek(k).encode('utf-8')), self.get_mark())
- bytes.append(chr(int(self.prefix(2), 16)))
- self.forward(2)
- try:
- value = unicode(''.join(bytes), 'utf-8')
- except UnicodeDecodeError, exc:
- raise ScannerError("while scanning a %s" % name, start_mark, str(exc), mark)
- return value
-
- def scan_line_break(self):
- # Transforms:
- # '\r\n' : '\n'
- # '\r' : '\n'
- # '\n' : '\n'
- # '\x85' : '\n'
- # '\u2028' : '\u2028'
- # '\u2029 : '\u2029'
- # default : ''
- ch = self.peek()
- if ch in u'\r\n\x85':
- if self.prefix(2) == u'\r\n':
- self.forward(2)
- else:
- self.forward()
- return u'\n'
- elif ch in u'\u2028\u2029':
- self.forward()
- return ch
- return u''
-
-#try:
-# import psyco
-# psyco.bind(Scanner)
-#except ImportError:
-# pass
-
diff --git a/src/collectors/python.d.plugin/python_modules/pyyaml2/serializer.py b/src/collectors/python.d.plugin/python_modules/pyyaml2/serializer.py
deleted file mode 100644
index 15fdbb0c0..000000000
--- a/src/collectors/python.d.plugin/python_modules/pyyaml2/serializer.py
+++ /dev/null
@@ -1,112 +0,0 @@
-# SPDX-License-Identifier: MIT
-
-__all__ = ['Serializer', 'SerializerError']
-
-from error import YAMLError
-from events import *
-from nodes import *
-
-class SerializerError(YAMLError):
- pass
-
-class Serializer(object):
-
- ANCHOR_TEMPLATE = u'id%03d'
-
- def __init__(self, encoding=None,
- explicit_start=None, explicit_end=None, version=None, tags=None):
- self.use_encoding = encoding
- self.use_explicit_start = explicit_start
- self.use_explicit_end = explicit_end
- self.use_version = version
- self.use_tags = tags
- self.serialized_nodes = {}
- self.anchors = {}
- self.last_anchor_id = 0
- self.closed = None
-
- def open(self):
- if self.closed is None:
- self.emit(StreamStartEvent(encoding=self.use_encoding))
- self.closed = False
- elif self.closed:
- raise SerializerError("serializer is closed")
- else:
- raise SerializerError("serializer is already opened")
-
- def close(self):
- if self.closed is None:
- raise SerializerError("serializer is not opened")
- elif not self.closed:
- self.emit(StreamEndEvent())
- self.closed = True
-
- #def __del__(self):
- # self.close()
-
- def serialize(self, node):
- if self.closed is None:
- raise SerializerError("serializer is not opened")
- elif self.closed:
- raise SerializerError("serializer is closed")
- self.emit(DocumentStartEvent(explicit=self.use_explicit_start,
- version=self.use_version, tags=self.use_tags))
- self.anchor_node(node)
- self.serialize_node(node, None, None)
- self.emit(DocumentEndEvent(explicit=self.use_explicit_end))
- self.serialized_nodes = {}
- self.anchors = {}
- self.last_anchor_id = 0
-
- def anchor_node(self, node):
- if node in self.anchors:
- if self.anchors[node] is None:
- self.anchors[node] = self.generate_anchor(node)
- else:
- self.anchors[node] = None
- if isinstance(node, SequenceNode):
- for item in node.value:
- self.anchor_node(item)
- elif isinstance(node, MappingNode):
- for key, value in node.value:
- self.anchor_node(key)
- self.anchor_node(value)
-
- def generate_anchor(self, node):
- self.last_anchor_id += 1
- return self.ANCHOR_TEMPLATE % self.last_anchor_id
-
- def serialize_node(self, node, parent, index):
- alias = self.anchors[node]
- if node in self.serialized_nodes:
- self.emit(AliasEvent(alias))
- else:
- self.serialized_nodes[node] = True
- self.descend_resolver(parent, index)
- if isinstance(node, ScalarNode):
- detected_tag = self.resolve(ScalarNode, node.value, (True, False))
- default_tag = self.resolve(ScalarNode, node.value, (False, True))
- implicit = (node.tag == detected_tag), (node.tag == default_tag)
- self.emit(ScalarEvent(alias, node.tag, implicit, node.value,
- style=node.style))
- elif isinstance(node, SequenceNode):
- implicit = (node.tag
- == self.resolve(SequenceNode, node.value, True))
- self.emit(SequenceStartEvent(alias, node.tag, implicit,
- flow_style=node.flow_style))
- index = 0
- for item in node.value:
- self.serialize_node(item, node, index)
- index += 1
- self.emit(SequenceEndEvent())
- elif isinstance(node, MappingNode):
- implicit = (node.tag
- == self.resolve(MappingNode, node.value, True))
- self.emit(MappingStartEvent(alias, node.tag, implicit,
- flow_style=node.flow_style))
- for key, value in node.value:
- self.serialize_node(key, node, None)
- self.serialize_node(value, node, key)
- self.emit(MappingEndEvent())
- self.ascend_resolver()
-
diff --git a/src/collectors/python.d.plugin/python_modules/pyyaml2/tokens.py b/src/collectors/python.d.plugin/python_modules/pyyaml2/tokens.py
deleted file mode 100644
index c5c4fb116..000000000
--- a/src/collectors/python.d.plugin/python_modules/pyyaml2/tokens.py
+++ /dev/null
@@ -1,105 +0,0 @@
-# SPDX-License-Identifier: MIT
-
-class Token(object):
- def __init__(self, start_mark, end_mark):
- self.start_mark = start_mark
- self.end_mark = end_mark
- def __repr__(self):
- attributes = [key for key in self.__dict__
- if not key.endswith('_mark')]
- attributes.sort()
- arguments = ', '.join(['%s=%r' % (key, getattr(self, key))
- for key in attributes])
- return '%s(%s)' % (self.__class__.__name__, arguments)
-
-#class BOMToken(Token):
-# id = '<byte order mark>'
-
-class DirectiveToken(Token):
- id = '<directive>'
- def __init__(self, name, value, start_mark, end_mark):
- self.name = name
- self.value = value
- self.start_mark = start_mark
- self.end_mark = end_mark
-
-class DocumentStartToken(Token):
- id = '<document start>'
-
-class DocumentEndToken(Token):
- id = '<document end>'
-
-class StreamStartToken(Token):
- id = '<stream start>'
- def __init__(self, start_mark=None, end_mark=None,
- encoding=None):
- self.start_mark = start_mark
- self.end_mark = end_mark
- self.encoding = encoding
-
-class StreamEndToken(Token):
- id = '<stream end>'
-
-class BlockSequenceStartToken(Token):
- id = '<block sequence start>'
-
-class BlockMappingStartToken(Token):
- id = '<block mapping start>'
-
-class BlockEndToken(Token):
- id = '<block end>'
-
-class FlowSequenceStartToken(Token):
- id = '['
-
-class FlowMappingStartToken(Token):
- id = '{'
-
-class FlowSequenceEndToken(Token):
- id = ']'
-
-class FlowMappingEndToken(Token):
- id = '}'
-
-class KeyToken(Token):
- id = '?'
-
-class ValueToken(Token):
- id = ':'
-
-class BlockEntryToken(Token):
- id = '-'
-
-class FlowEntryToken(Token):
- id = ','
-
-class AliasToken(Token):
- id = '<alias>'
- def __init__(self, value, start_mark, end_mark):
- self.value = value
- self.start_mark = start_mark
- self.end_mark = end_mark
-
-class AnchorToken(Token):
- id = '<anchor>'
- def __init__(self, value, start_mark, end_mark):
- self.value = value
- self.start_mark = start_mark
- self.end_mark = end_mark
-
-class TagToken(Token):
- id = '<tag>'
- def __init__(self, value, start_mark, end_mark):
- self.value = value
- self.start_mark = start_mark
- self.end_mark = end_mark
-
-class ScalarToken(Token):
- id = '<scalar>'
- def __init__(self, value, plain, start_mark, end_mark, style=None):
- self.value = value
- self.plain = plain
- self.start_mark = start_mark
- self.end_mark = end_mark
- self.style = style
-
diff --git a/src/collectors/python.d.plugin/python_modules/pyyaml3/__init__.py b/src/collectors/python.d.plugin/python_modules/pyyaml3/__init__.py
deleted file mode 100644
index a884b33cf..000000000
--- a/src/collectors/python.d.plugin/python_modules/pyyaml3/__init__.py
+++ /dev/null
@@ -1,313 +0,0 @@
-# SPDX-License-Identifier: MIT
-
-from .error import *
-
-from .tokens import *
-from .events import *
-from .nodes import *
-
-from .loader import *
-from .dumper import *
-
-__version__ = '3.11'
-try:
- from .cyaml import *
- __with_libyaml__ = True
-except ImportError:
- __with_libyaml__ = False
-
-import io
-
-def scan(stream, Loader=Loader):
- """
- Scan a YAML stream and produce scanning tokens.
- """
- loader = Loader(stream)
- try:
- while loader.check_token():
- yield loader.get_token()
- finally:
- loader.dispose()
-
-def parse(stream, Loader=Loader):
- """
- Parse a YAML stream and produce parsing events.
- """
- loader = Loader(stream)
- try:
- while loader.check_event():
- yield loader.get_event()
- finally:
- loader.dispose()
-
-def compose(stream, Loader=Loader):
- """
- Parse the first YAML document in a stream
- and produce the corresponding representation tree.
- """
- loader = Loader(stream)
- try:
- return loader.get_single_node()
- finally:
- loader.dispose()
-
-def compose_all(stream, Loader=Loader):
- """
- Parse all YAML documents in a stream
- and produce corresponding representation trees.
- """
- loader = Loader(stream)
- try:
- while loader.check_node():
- yield loader.get_node()
- finally:
- loader.dispose()
-
-def load(stream, Loader=Loader):
- """
- Parse the first YAML document in a stream
- and produce the corresponding Python object.
- """
- loader = Loader(stream)
- try:
- return loader.get_single_data()
- finally:
- loader.dispose()
-
-def load_all(stream, Loader=Loader):
- """
- Parse all YAML documents in a stream
- and produce corresponding Python objects.
- """
- loader = Loader(stream)
- try:
- while loader.check_data():
- yield loader.get_data()
- finally:
- loader.dispose()
-
-def safe_load(stream):
- """
- Parse the first YAML document in a stream
- and produce the corresponding Python object.
- Resolve only basic YAML tags.
- """
- return load(stream, SafeLoader)
-
-def safe_load_all(stream):
- """
- Parse all YAML documents in a stream
- and produce corresponding Python objects.
- Resolve only basic YAML tags.
- """
- return load_all(stream, SafeLoader)
-
-def emit(events, stream=None, Dumper=Dumper,
- canonical=None, indent=None, width=None,
- allow_unicode=None, line_break=None):
- """
- Emit YAML parsing events into a stream.
- If stream is None, return the produced string instead.
- """
- getvalue = None
- if stream is None:
- stream = io.StringIO()
- getvalue = stream.getvalue
- dumper = Dumper(stream, canonical=canonical, indent=indent, width=width,
- allow_unicode=allow_unicode, line_break=line_break)
- try:
- for event in events:
- dumper.emit(event)
- finally:
- dumper.dispose()
- if getvalue:
- return getvalue()
-
-def serialize_all(nodes, stream=None, Dumper=Dumper,
- canonical=None, indent=None, width=None,
- allow_unicode=None, line_break=None,
- encoding=None, explicit_start=None, explicit_end=None,
- version=None, tags=None):
- """
- Serialize a sequence of representation trees into a YAML stream.
- If stream is None, return the produced string instead.
- """
- getvalue = None
- if stream is None:
- if encoding is None:
- stream = io.StringIO()
- else:
- stream = io.BytesIO()
- getvalue = stream.getvalue
- dumper = Dumper(stream, canonical=canonical, indent=indent, width=width,
- allow_unicode=allow_unicode, line_break=line_break,
- encoding=encoding, version=version, tags=tags,
- explicit_start=explicit_start, explicit_end=explicit_end)
- try:
- dumper.open()
- for node in nodes:
- dumper.serialize(node)
- dumper.close()
- finally:
- dumper.dispose()
- if getvalue:
- return getvalue()
-
-def serialize(node, stream=None, Dumper=Dumper, **kwds):
- """
- Serialize a representation tree into a YAML stream.
- If stream is None, return the produced string instead.
- """
- return serialize_all([node], stream, Dumper=Dumper, **kwds)
-
-def dump_all(documents, stream=None, Dumper=Dumper,
- default_style=None, default_flow_style=None,
- canonical=None, indent=None, width=None,
- allow_unicode=None, line_break=None,
- encoding=None, explicit_start=None, explicit_end=None,
- version=None, tags=None):
- """
- Serialize a sequence of Python objects into a YAML stream.
- If stream is None, return the produced string instead.
- """
- getvalue = None
- if stream is None:
- if encoding is None:
- stream = io.StringIO()
- else:
- stream = io.BytesIO()
- getvalue = stream.getvalue
- dumper = Dumper(stream, default_style=default_style,
- default_flow_style=default_flow_style,
- canonical=canonical, indent=indent, width=width,
- allow_unicode=allow_unicode, line_break=line_break,
- encoding=encoding, version=version, tags=tags,
- explicit_start=explicit_start, explicit_end=explicit_end)
- try:
- dumper.open()
- for data in documents:
- dumper.represent(data)
- dumper.close()
- finally:
- dumper.dispose()
- if getvalue:
- return getvalue()
-
-def dump(data, stream=None, Dumper=Dumper, **kwds):
- """
- Serialize a Python object into a YAML stream.
- If stream is None, return the produced string instead.
- """
- return dump_all([data], stream, Dumper=Dumper, **kwds)
-
-def safe_dump_all(documents, stream=None, **kwds):
- """
- Serialize a sequence of Python objects into a YAML stream.
- Produce only basic YAML tags.
- If stream is None, return the produced string instead.
- """
- return dump_all(documents, stream, Dumper=SafeDumper, **kwds)
-
-def safe_dump(data, stream=None, **kwds):
- """
- Serialize a Python object into a YAML stream.
- Produce only basic YAML tags.
- If stream is None, return the produced string instead.
- """
- return dump_all([data], stream, Dumper=SafeDumper, **kwds)
-
-def add_implicit_resolver(tag, regexp, first=None,
- Loader=Loader, Dumper=Dumper):
- """
- Add an implicit scalar detector.
- If an implicit scalar value matches the given regexp,
- the corresponding tag is assigned to the scalar.
- first is a sequence of possible initial characters or None.
- """
- Loader.add_implicit_resolver(tag, regexp, first)
- Dumper.add_implicit_resolver(tag, regexp, first)
-
-def add_path_resolver(tag, path, kind=None, Loader=Loader, Dumper=Dumper):
- """
- Add a path based resolver for the given tag.
- A path is a list of keys that forms a path
- to a node in the representation tree.
- Keys can be string values, integers, or None.
- """
- Loader.add_path_resolver(tag, path, kind)
- Dumper.add_path_resolver(tag, path, kind)
-
-def add_constructor(tag, constructor, Loader=Loader):
- """
- Add a constructor for the given tag.
- Constructor is a function that accepts a Loader instance
- and a node object and produces the corresponding Python object.
- """
- Loader.add_constructor(tag, constructor)
-
-def add_multi_constructor(tag_prefix, multi_constructor, Loader=Loader):
- """
- Add a multi-constructor for the given tag prefix.
- Multi-constructor is called for a node if its tag starts with tag_prefix.
- Multi-constructor accepts a Loader instance, a tag suffix,
- and a node object and produces the corresponding Python object.
- """
- Loader.add_multi_constructor(tag_prefix, multi_constructor)
-
-def add_representer(data_type, representer, Dumper=Dumper):
- """
- Add a representer for the given type.
- Representer is a function accepting a Dumper instance
- and an instance of the given data type
- and producing the corresponding representation node.
- """
- Dumper.add_representer(data_type, representer)
-
-def add_multi_representer(data_type, multi_representer, Dumper=Dumper):
- """
- Add a representer for the given type.
- Multi-representer is a function accepting a Dumper instance
- and an instance of the given data type or subtype
- and producing the corresponding representation node.
- """
- Dumper.add_multi_representer(data_type, multi_representer)
-
-class YAMLObjectMetaclass(type):
- """
- The metaclass for YAMLObject.
- """
- def __init__(cls, name, bases, kwds):
- super(YAMLObjectMetaclass, cls).__init__(name, bases, kwds)
- if 'yaml_tag' in kwds and kwds['yaml_tag'] is not None:
- cls.yaml_loader.add_constructor(cls.yaml_tag, cls.from_yaml)
- cls.yaml_dumper.add_representer(cls, cls.to_yaml)
-
-class YAMLObject(metaclass=YAMLObjectMetaclass):
- """
- An object that can dump itself to a YAML stream
- and load itself from a YAML stream.
- """
-
- __slots__ = () # no direct instantiation, so allow immutable subclasses
-
- yaml_loader = Loader
- yaml_dumper = Dumper
-
- yaml_tag = None
- yaml_flow_style = None
-
- @classmethod
- def from_yaml(cls, loader, node):
- """
- Convert a representation node to a Python object.
- """
- return loader.construct_yaml_object(node, cls)
-
- @classmethod
- def to_yaml(cls, dumper, data):
- """
- Convert a Python object to a representation node.
- """
- return dumper.represent_yaml_object(cls.yaml_tag, data, cls,
- flow_style=cls.yaml_flow_style)
-
diff --git a/src/collectors/python.d.plugin/python_modules/pyyaml3/composer.py b/src/collectors/python.d.plugin/python_modules/pyyaml3/composer.py
deleted file mode 100644
index c418bba91..000000000
--- a/src/collectors/python.d.plugin/python_modules/pyyaml3/composer.py
+++ /dev/null
@@ -1,140 +0,0 @@
-# SPDX-License-Identifier: MIT
-
-__all__ = ['Composer', 'ComposerError']
-
-from .error import MarkedYAMLError
-from .events import *
-from .nodes import *
-
-class ComposerError(MarkedYAMLError):
- pass
-
-class Composer:
-
- def __init__(self):
- self.anchors = {}
-
- def check_node(self):
- # Drop the STREAM-START event.
- if self.check_event(StreamStartEvent):
- self.get_event()
-
- # If there are more documents available?
- return not self.check_event(StreamEndEvent)
-
- def get_node(self):
- # Get the root node of the next document.
- if not self.check_event(StreamEndEvent):
- return self.compose_document()
-
- def get_single_node(self):
- # Drop the STREAM-START event.
- self.get_event()
-
- # Compose a document if the stream is not empty.
- document = None
- if not self.check_event(StreamEndEvent):
- document = self.compose_document()
-
- # Ensure that the stream contains no more documents.
- if not self.check_event(StreamEndEvent):
- event = self.get_event()
- raise ComposerError("expected a single document in the stream",
- document.start_mark, "but found another document",
- event.start_mark)
-
- # Drop the STREAM-END event.
- self.get_event()
-
- return document
-
- def compose_document(self):
- # Drop the DOCUMENT-START event.
- self.get_event()
-
- # Compose the root node.
- node = self.compose_node(None, None)
-
- # Drop the DOCUMENT-END event.
- self.get_event()
-
- self.anchors = {}
- return node
-
- def compose_node(self, parent, index):
- if self.check_event(AliasEvent):
- event = self.get_event()
- anchor = event.anchor
- if anchor not in self.anchors:
- raise ComposerError(None, None, "found undefined alias %r"
- % anchor, event.start_mark)
- return self.anchors[anchor]
- event = self.peek_event()
- anchor = event.anchor
- if anchor is not None:
- if anchor in self.anchors:
- raise ComposerError("found duplicate anchor %r; first occurence"
- % anchor, self.anchors[anchor].start_mark,
- "second occurence", event.start_mark)
- self.descend_resolver(parent, index)
- if self.check_event(ScalarEvent):
- node = self.compose_scalar_node(anchor)
- elif self.check_event(SequenceStartEvent):
- node = self.compose_sequence_node(anchor)
- elif self.check_event(MappingStartEvent):
- node = self.compose_mapping_node(anchor)
- self.ascend_resolver()
- return node
-
- def compose_scalar_node(self, anchor):
- event = self.get_event()
- tag = event.tag
- if tag is None or tag == '!':
- tag = self.resolve(ScalarNode, event.value, event.implicit)
- node = ScalarNode(tag, event.value,
- event.start_mark, event.end_mark, style=event.style)
- if anchor is not None:
- self.anchors[anchor] = node
- return node
-
- def compose_sequence_node(self, anchor):
- start_event = self.get_event()
- tag = start_event.tag
- if tag is None or tag == '!':
- tag = self.resolve(SequenceNode, None, start_event.implicit)
- node = SequenceNode(tag, [],
- start_event.start_mark, None,
- flow_style=start_event.flow_style)
- if anchor is not None:
- self.anchors[anchor] = node
- index = 0
- while not self.check_event(SequenceEndEvent):
- node.value.append(self.compose_node(node, index))
- index += 1
- end_event = self.get_event()
- node.end_mark = end_event.end_mark
- return node
-
- def compose_mapping_node(self, anchor):
- start_event = self.get_event()
- tag = start_event.tag
- if tag is None or tag == '!':
- tag = self.resolve(MappingNode, None, start_event.implicit)
- node = MappingNode(tag, [],
- start_event.start_mark, None,
- flow_style=start_event.flow_style)
- if anchor is not None:
- self.anchors[anchor] = node
- while not self.check_event(MappingEndEvent):
- #key_event = self.peek_event()
- item_key = self.compose_node(node, None)
- #if item_key in node.value:
- # raise ComposerError("while composing a mapping", start_event.start_mark,
- # "found duplicate key", key_event.start_mark)
- item_value = self.compose_node(node, item_key)
- #node.value[item_key] = item_value
- node.value.append((item_key, item_value))
- end_event = self.get_event()
- node.end_mark = end_event.end_mark
- return node
-
diff --git a/src/collectors/python.d.plugin/python_modules/pyyaml3/constructor.py b/src/collectors/python.d.plugin/python_modules/pyyaml3/constructor.py
deleted file mode 100644
index ee09a7a7e..000000000
--- a/src/collectors/python.d.plugin/python_modules/pyyaml3/constructor.py
+++ /dev/null
@@ -1,687 +0,0 @@
-# SPDX-License-Identifier: MIT
-
-__all__ = ['BaseConstructor', 'SafeConstructor', 'Constructor',
- 'ConstructorError']
-
-from .error import *
-from .nodes import *
-
-import collections, datetime, base64, binascii, re, sys, types
-
-class ConstructorError(MarkedYAMLError):
- pass
-
-class BaseConstructor:
-
- yaml_constructors = {}
- yaml_multi_constructors = {}
-
- def __init__(self):
- self.constructed_objects = {}
- self.recursive_objects = {}
- self.state_generators = []
- self.deep_construct = False
-
- def check_data(self):
- # If there are more documents available?
- return self.check_node()
-
- def get_data(self):
- # Construct and return the next document.
- if self.check_node():
- return self.construct_document(self.get_node())
-
- def get_single_data(self):
- # Ensure that the stream contains a single document and construct it.
- node = self.get_single_node()
- if node is not None:
- return self.construct_document(node)
- return None
-
- def construct_document(self, node):
- data = self.construct_object(node)
- while self.state_generators:
- state_generators = self.state_generators
- self.state_generators = []
- for generator in state_generators:
- for dummy in generator:
- pass
- self.constructed_objects = {}
- self.recursive_objects = {}
- self.deep_construct = False
- return data
-
- def construct_object(self, node, deep=False):
- if node in self.constructed_objects:
- return self.constructed_objects[node]
- if deep:
- old_deep = self.deep_construct
- self.deep_construct = True
- if node in self.recursive_objects:
- raise ConstructorError(None, None,
- "found unconstructable recursive node", node.start_mark)
- self.recursive_objects[node] = None
- constructor = None
- tag_suffix = None
- if node.tag in self.yaml_constructors:
- constructor = self.yaml_constructors[node.tag]
- else:
- for tag_prefix in self.yaml_multi_constructors:
- if node.tag.startswith(tag_prefix):
- tag_suffix = node.tag[len(tag_prefix):]
- constructor = self.yaml_multi_constructors[tag_prefix]
- break
- else:
- if None in self.yaml_multi_constructors:
- tag_suffix = node.tag
- constructor = self.yaml_multi_constructors[None]
- elif None in self.yaml_constructors:
- constructor = self.yaml_constructors[None]
- elif isinstance(node, ScalarNode):
- constructor = self.__class__.construct_scalar
- elif isinstance(node, SequenceNode):
- constructor = self.__class__.construct_sequence
- elif isinstance(node, MappingNode):
- constructor = self.__class__.construct_mapping
- if tag_suffix is None:
- data = constructor(self, node)
- else:
- data = constructor(self, tag_suffix, node)
- if isinstance(data, types.GeneratorType):
- generator = data
- data = next(generator)
- if self.deep_construct:
- for dummy in generator:
- pass
- else:
- self.state_generators.append(generator)
- self.constructed_objects[node] = data
- del self.recursive_objects[node]
- if deep:
- self.deep_construct = old_deep
- return data
-
- def construct_scalar(self, node):
- if not isinstance(node, ScalarNode):
- raise ConstructorError(None, None,
- "expected a scalar node, but found %s" % node.id,
- node.start_mark)
- return node.value
-
- def construct_sequence(self, node, deep=False):
- if not isinstance(node, SequenceNode):
- raise ConstructorError(None, None,
- "expected a sequence node, but found %s" % node.id,
- node.start_mark)
- return [self.construct_object(child, deep=deep)
- for child in node.value]
-
- def construct_mapping(self, node, deep=False):
- if not isinstance(node, MappingNode):
- raise ConstructorError(None, None,
- "expected a mapping node, but found %s" % node.id,
- node.start_mark)
- mapping = {}
- for key_node, value_node in node.value:
- key = self.construct_object(key_node, deep=deep)
- if not isinstance(key, collections.Hashable):
- raise ConstructorError("while constructing a mapping", node.start_mark,
- "found unhashable key", key_node.start_mark)
- value = self.construct_object(value_node, deep=deep)
- mapping[key] = value
- return mapping
-
- def construct_pairs(self, node, deep=False):
- if not isinstance(node, MappingNode):
- raise ConstructorError(None, None,
- "expected a mapping node, but found %s" % node.id,
- node.start_mark)
- pairs = []
- for key_node, value_node in node.value:
- key = self.construct_object(key_node, deep=deep)
- value = self.construct_object(value_node, deep=deep)
- pairs.append((key, value))
- return pairs
-
- @classmethod
- def add_constructor(cls, tag, constructor):
- if not 'yaml_constructors' in cls.__dict__:
- cls.yaml_constructors = cls.yaml_constructors.copy()
- cls.yaml_constructors[tag] = constructor
-
- @classmethod
- def add_multi_constructor(cls, tag_prefix, multi_constructor):
- if not 'yaml_multi_constructors' in cls.__dict__:
- cls.yaml_multi_constructors = cls.yaml_multi_constructors.copy()
- cls.yaml_multi_constructors[tag_prefix] = multi_constructor
-
-class SafeConstructor(BaseConstructor):
-
- def construct_scalar(self, node):
- if isinstance(node, MappingNode):
- for key_node, value_node in node.value:
- if key_node.tag == 'tag:yaml.org,2002:value':
- return self.construct_scalar(value_node)
- return super().construct_scalar(node)
-
- def flatten_mapping(self, node):
- merge = []
- index = 0
- while index < len(node.value):
- key_node, value_node = node.value[index]
- if key_node.tag == 'tag:yaml.org,2002:merge':
- del node.value[index]
- if isinstance(value_node, MappingNode):
- self.flatten_mapping(value_node)
- merge.extend(value_node.value)
- elif isinstance(value_node, SequenceNode):
- submerge = []
- for subnode in value_node.value:
- if not isinstance(subnode, MappingNode):
- raise ConstructorError("while constructing a mapping",
- node.start_mark,
- "expected a mapping for merging, but found %s"
- % subnode.id, subnode.start_mark)
- self.flatten_mapping(subnode)
- submerge.append(subnode.value)
- submerge.reverse()
- for value in submerge:
- merge.extend(value)
- else:
- raise ConstructorError("while constructing a mapping", node.start_mark,
- "expected a mapping or list of mappings for merging, but found %s"
- % value_node.id, value_node.start_mark)
- elif key_node.tag == 'tag:yaml.org,2002:value':
- key_node.tag = 'tag:yaml.org,2002:str'
- index += 1
- else:
- index += 1
- if merge:
- node.value = merge + node.value
-
- def construct_mapping(self, node, deep=False):
- if isinstance(node, MappingNode):
- self.flatten_mapping(node)
- return super().construct_mapping(node, deep=deep)
-
- def construct_yaml_null(self, node):
- self.construct_scalar(node)
- return None
-
- bool_values = {
- 'yes': True,
- 'no': False,
- 'true': True,
- 'false': False,
- 'on': True,
- 'off': False,
- }
-
- def construct_yaml_bool(self, node):
- value = self.construct_scalar(node)
- return self.bool_values[value.lower()]
-
- def construct_yaml_int(self, node):
- value = self.construct_scalar(node)
- value = value.replace('_', '')
- sign = +1
- if value[0] == '-':
- sign = -1
- if value[0] in '+-':
- value = value[1:]
- if value == '0':
- return 0
- elif value.startswith('0b'):
- return sign*int(value[2:], 2)
- elif value.startswith('0x'):
- return sign*int(value[2:], 16)
- elif value[0] == '0':
- return sign*int(value, 8)
- elif ':' in value:
- digits = [int(part) for part in value.split(':')]
- digits.reverse()
- base = 1
- value = 0
- for digit in digits:
- value += digit*base
- base *= 60
- return sign*value
- else:
- return sign*int(value)
-
- inf_value = 1e300
- while inf_value != inf_value*inf_value:
- inf_value *= inf_value
- nan_value = -inf_value/inf_value # Trying to make a quiet NaN (like C99).
-
- def construct_yaml_float(self, node):
- value = self.construct_scalar(node)
- value = value.replace('_', '').lower()
- sign = +1
- if value[0] == '-':
- sign = -1
- if value[0] in '+-':
- value = value[1:]
- if value == '.inf':
- return sign*self.inf_value
- elif value == '.nan':
- return self.nan_value
- elif ':' in value:
- digits = [float(part) for part in value.split(':')]
- digits.reverse()
- base = 1
- value = 0.0
- for digit in digits:
- value += digit*base
- base *= 60
- return sign*value
- else:
- return sign*float(value)
-
- def construct_yaml_binary(self, node):
- try:
- value = self.construct_scalar(node).encode('ascii')
- except UnicodeEncodeError as exc:
- raise ConstructorError(None, None,
- "failed to convert base64 data into ascii: %s" % exc,
- node.start_mark)
- try:
- if hasattr(base64, 'decodebytes'):
- return base64.decodebytes(value)
- else:
- return base64.decodestring(value)
- except binascii.Error as exc:
- raise ConstructorError(None, None,
- "failed to decode base64 data: %s" % exc, node.start_mark)
-
- timestamp_regexp = re.compile(
- r'''^(?P<year>[0-9][0-9][0-9][0-9])
- -(?P<month>[0-9][0-9]?)
- -(?P<day>[0-9][0-9]?)
- (?:(?:[Tt]|[ \t]+)
- (?P<hour>[0-9][0-9]?)
- :(?P<minute>[0-9][0-9])
- :(?P<second>[0-9][0-9])
- (?:\.(?P<fraction>[0-9]*))?
- (?:[ \t]*(?P<tz>Z|(?P<tz_sign>[-+])(?P<tz_hour>[0-9][0-9]?)
- (?::(?P<tz_minute>[0-9][0-9]))?))?)?$''', re.X)
-
- def construct_yaml_timestamp(self, node):
- value = self.construct_scalar(node)
- match = self.timestamp_regexp.match(node.value)
- values = match.groupdict()
- year = int(values['year'])
- month = int(values['month'])
- day = int(values['day'])
- if not values['hour']:
- return datetime.date(year, month, day)
- hour = int(values['hour'])
- minute = int(values['minute'])
- second = int(values['second'])
- fraction = 0
- if values['fraction']:
- fraction = values['fraction'][:6]
- while len(fraction) < 6:
- fraction += '0'
- fraction = int(fraction)
- delta = None
- if values['tz_sign']:
- tz_hour = int(values['tz_hour'])
- tz_minute = int(values['tz_minute'] or 0)
- delta = datetime.timedelta(hours=tz_hour, minutes=tz_minute)
- if values['tz_sign'] == '-':
- delta = -delta
- data = datetime.datetime(year, month, day, hour, minute, second, fraction)
- if delta:
- data -= delta
- return data
-
- def construct_yaml_omap(self, node):
- # Note: we do not check for duplicate keys, because it's too
- # CPU-expensive.
- omap = []
- yield omap
- if not isinstance(node, SequenceNode):
- raise ConstructorError("while constructing an ordered map", node.start_mark,
- "expected a sequence, but found %s" % node.id, node.start_mark)
- for subnode in node.value:
- if not isinstance(subnode, MappingNode):
- raise ConstructorError("while constructing an ordered map", node.start_mark,
- "expected a mapping of length 1, but found %s" % subnode.id,
- subnode.start_mark)
- if len(subnode.value) != 1:
- raise ConstructorError("while constructing an ordered map", node.start_mark,
- "expected a single mapping item, but found %d items" % len(subnode.value),
- subnode.start_mark)
- key_node, value_node = subnode.value[0]
- key = self.construct_object(key_node)
- value = self.construct_object(value_node)
- omap.append((key, value))
-
- def construct_yaml_pairs(self, node):
- # Note: the same code as `construct_yaml_omap`.
- pairs = []
- yield pairs
- if not isinstance(node, SequenceNode):
- raise ConstructorError("while constructing pairs", node.start_mark,
- "expected a sequence, but found %s" % node.id, node.start_mark)
- for subnode in node.value:
- if not isinstance(subnode, MappingNode):
- raise ConstructorError("while constructing pairs", node.start_mark,
- "expected a mapping of length 1, but found %s" % subnode.id,
- subnode.start_mark)
- if len(subnode.value) != 1:
- raise ConstructorError("while constructing pairs", node.start_mark,
- "expected a single mapping item, but found %d items" % len(subnode.value),
- subnode.start_mark)
- key_node, value_node = subnode.value[0]
- key = self.construct_object(key_node)
- value = self.construct_object(value_node)
- pairs.append((key, value))
-
- def construct_yaml_set(self, node):
- data = set()
- yield data
- value = self.construct_mapping(node)
- data.update(value)
-
- def construct_yaml_str(self, node):
- return self.construct_scalar(node)
-
- def construct_yaml_seq(self, node):
- data = []
- yield data
- data.extend(self.construct_sequence(node))
-
- def construct_yaml_map(self, node):
- data = {}
- yield data
- value = self.construct_mapping(node)
- data.update(value)
-
- def construct_yaml_object(self, node, cls):
- data = cls.__new__(cls)
- yield data
- if hasattr(data, '__setstate__'):
- state = self.construct_mapping(node, deep=True)
- data.__setstate__(state)
- else:
- state = self.construct_mapping(node)
- data.__dict__.update(state)
-
- def construct_undefined(self, node):
- raise ConstructorError(None, None,
- "could not determine a constructor for the tag %r" % node.tag,
- node.start_mark)
-
-SafeConstructor.add_constructor(
- 'tag:yaml.org,2002:null',
- SafeConstructor.construct_yaml_null)
-
-SafeConstructor.add_constructor(
- 'tag:yaml.org,2002:bool',
- SafeConstructor.construct_yaml_bool)
-
-SafeConstructor.add_constructor(
- 'tag:yaml.org,2002:int',
- SafeConstructor.construct_yaml_int)
-
-SafeConstructor.add_constructor(
- 'tag:yaml.org,2002:float',
- SafeConstructor.construct_yaml_float)
-
-SafeConstructor.add_constructor(
- 'tag:yaml.org,2002:binary',
- SafeConstructor.construct_yaml_binary)
-
-SafeConstructor.add_constructor(
- 'tag:yaml.org,2002:timestamp',
- SafeConstructor.construct_yaml_timestamp)
-
-SafeConstructor.add_constructor(
- 'tag:yaml.org,2002:omap',
- SafeConstructor.construct_yaml_omap)
-
-SafeConstructor.add_constructor(
- 'tag:yaml.org,2002:pairs',
- SafeConstructor.construct_yaml_pairs)
-
-SafeConstructor.add_constructor(
- 'tag:yaml.org,2002:set',
- SafeConstructor.construct_yaml_set)
-
-SafeConstructor.add_constructor(
- 'tag:yaml.org,2002:str',
- SafeConstructor.construct_yaml_str)
-
-SafeConstructor.add_constructor(
- 'tag:yaml.org,2002:seq',
- SafeConstructor.construct_yaml_seq)
-
-SafeConstructor.add_constructor(
- 'tag:yaml.org,2002:map',
- SafeConstructor.construct_yaml_map)
-
-SafeConstructor.add_constructor(None,
- SafeConstructor.construct_undefined)
-
-class Constructor(SafeConstructor):
-
- def construct_python_str(self, node):
- return self.construct_scalar(node)
-
- def construct_python_unicode(self, node):
- return self.construct_scalar(node)
-
- def construct_python_bytes(self, node):
- try:
- value = self.construct_scalar(node).encode('ascii')
- except UnicodeEncodeError as exc:
- raise ConstructorError(None, None,
- "failed to convert base64 data into ascii: %s" % exc,
- node.start_mark)
- try:
- if hasattr(base64, 'decodebytes'):
- return base64.decodebytes(value)
- else:
- return base64.decodestring(value)
- except binascii.Error as exc:
- raise ConstructorError(None, None,
- "failed to decode base64 data: %s" % exc, node.start_mark)
-
- def construct_python_long(self, node):
- return self.construct_yaml_int(node)
-
- def construct_python_complex(self, node):
- return complex(self.construct_scalar(node))
-
- def construct_python_tuple(self, node):
- return tuple(self.construct_sequence(node))
-
- def find_python_module(self, name, mark):
- if not name:
- raise ConstructorError("while constructing a Python module", mark,
- "expected non-empty name appended to the tag", mark)
- try:
- __import__(name)
- except ImportError as exc:
- raise ConstructorError("while constructing a Python module", mark,
- "cannot find module %r (%s)" % (name, exc), mark)
- return sys.modules[name]
-
- def find_python_name(self, name, mark):
- if not name:
- raise ConstructorError("while constructing a Python object", mark,
- "expected non-empty name appended to the tag", mark)
- if '.' in name:
- module_name, object_name = name.rsplit('.', 1)
- else:
- module_name = 'builtins'
- object_name = name
- try:
- __import__(module_name)
- except ImportError as exc:
- raise ConstructorError("while constructing a Python object", mark,
- "cannot find module %r (%s)" % (module_name, exc), mark)
- module = sys.modules[module_name]
- if not hasattr(module, object_name):
- raise ConstructorError("while constructing a Python object", mark,
- "cannot find %r in the module %r"
- % (object_name, module.__name__), mark)
- return getattr(module, object_name)
-
- def construct_python_name(self, suffix, node):
- value = self.construct_scalar(node)
- if value:
- raise ConstructorError("while constructing a Python name", node.start_mark,
- "expected the empty value, but found %r" % value, node.start_mark)
- return self.find_python_name(suffix, node.start_mark)
-
- def construct_python_module(self, suffix, node):
- value = self.construct_scalar(node)
- if value:
- raise ConstructorError("while constructing a Python module", node.start_mark,
- "expected the empty value, but found %r" % value, node.start_mark)
- return self.find_python_module(suffix, node.start_mark)
-
- def make_python_instance(self, suffix, node,
- args=None, kwds=None, newobj=False):
- if not args:
- args = []
- if not kwds:
- kwds = {}
- cls = self.find_python_name(suffix, node.start_mark)
- if newobj and isinstance(cls, type):
- return cls.__new__(cls, *args, **kwds)
- else:
- return cls(*args, **kwds)
-
- def set_python_instance_state(self, instance, state):
- if hasattr(instance, '__setstate__'):
- instance.__setstate__(state)
- else:
- slotstate = {}
- if isinstance(state, tuple) and len(state) == 2:
- state, slotstate = state
- if hasattr(instance, '__dict__'):
- instance.__dict__.update(state)
- elif state:
- slotstate.update(state)
- for key, value in slotstate.items():
- setattr(object, key, value)
-
- def construct_python_object(self, suffix, node):
- # Format:
- # !!python/object:module.name { ... state ... }
- instance = self.make_python_instance(suffix, node, newobj=True)
- yield instance
- deep = hasattr(instance, '__setstate__')
- state = self.construct_mapping(node, deep=deep)
- self.set_python_instance_state(instance, state)
-
- def construct_python_object_apply(self, suffix, node, newobj=False):
- # Format:
- # !!python/object/apply # (or !!python/object/new)
- # args: [ ... arguments ... ]
- # kwds: { ... keywords ... }
- # state: ... state ...
- # listitems: [ ... listitems ... ]
- # dictitems: { ... dictitems ... }
- # or short format:
- # !!python/object/apply [ ... arguments ... ]
- # The difference between !!python/object/apply and !!python/object/new
- # is how an object is created, check make_python_instance for details.
- if isinstance(node, SequenceNode):
- args = self.construct_sequence(node, deep=True)
- kwds = {}
- state = {}
- listitems = []
- dictitems = {}
- else:
- value = self.construct_mapping(node, deep=True)
- args = value.get('args', [])
- kwds = value.get('kwds', {})
- state = value.get('state', {})
- listitems = value.get('listitems', [])
- dictitems = value.get('dictitems', {})
- instance = self.make_python_instance(suffix, node, args, kwds, newobj)
- if state:
- self.set_python_instance_state(instance, state)
- if listitems:
- instance.extend(listitems)
- if dictitems:
- for key in dictitems:
- instance[key] = dictitems[key]
- return instance
-
- def construct_python_object_new(self, suffix, node):
- return self.construct_python_object_apply(suffix, node, newobj=True)
-
-Constructor.add_constructor(
- 'tag:yaml.org,2002:python/none',
- Constructor.construct_yaml_null)
-
-Constructor.add_constructor(
- 'tag:yaml.org,2002:python/bool',
- Constructor.construct_yaml_bool)
-
-Constructor.add_constructor(
- 'tag:yaml.org,2002:python/str',
- Constructor.construct_python_str)
-
-Constructor.add_constructor(
- 'tag:yaml.org,2002:python/unicode',
- Constructor.construct_python_unicode)
-
-Constructor.add_constructor(
- 'tag:yaml.org,2002:python/bytes',
- Constructor.construct_python_bytes)
-
-Constructor.add_constructor(
- 'tag:yaml.org,2002:python/int',
- Constructor.construct_yaml_int)
-
-Constructor.add_constructor(
- 'tag:yaml.org,2002:python/long',
- Constructor.construct_python_long)
-
-Constructor.add_constructor(
- 'tag:yaml.org,2002:python/float',
- Constructor.construct_yaml_float)
-
-Constructor.add_constructor(
- 'tag:yaml.org,2002:python/complex',
- Constructor.construct_python_complex)
-
-Constructor.add_constructor(
- 'tag:yaml.org,2002:python/list',
- Constructor.construct_yaml_seq)
-
-Constructor.add_constructor(
- 'tag:yaml.org,2002:python/tuple',
- Constructor.construct_python_tuple)
-
-Constructor.add_constructor(
- 'tag:yaml.org,2002:python/dict',
- Constructor.construct_yaml_map)
-
-Constructor.add_multi_constructor(
- 'tag:yaml.org,2002:python/name:',
- Constructor.construct_python_name)
-
-Constructor.add_multi_constructor(
- 'tag:yaml.org,2002:python/module:',
- Constructor.construct_python_module)
-
-Constructor.add_multi_constructor(
- 'tag:yaml.org,2002:python/object:',
- Constructor.construct_python_object)
-
-Constructor.add_multi_constructor(
- 'tag:yaml.org,2002:python/object/apply:',
- Constructor.construct_python_object_apply)
-
-Constructor.add_multi_constructor(
- 'tag:yaml.org,2002:python/object/new:',
- Constructor.construct_python_object_new)
-
diff --git a/src/collectors/python.d.plugin/python_modules/pyyaml3/cyaml.py b/src/collectors/python.d.plugin/python_modules/pyyaml3/cyaml.py
deleted file mode 100644
index e6c16d894..000000000
--- a/src/collectors/python.d.plugin/python_modules/pyyaml3/cyaml.py
+++ /dev/null
@@ -1,86 +0,0 @@
-# SPDX-License-Identifier: MIT
-
-__all__ = ['CBaseLoader', 'CSafeLoader', 'CLoader',
- 'CBaseDumper', 'CSafeDumper', 'CDumper']
-
-from _yaml import CParser, CEmitter
-
-from .constructor import *
-
-from .serializer import *
-from .representer import *
-
-from .resolver import *
-
-class CBaseLoader(CParser, BaseConstructor, BaseResolver):
-
- def __init__(self, stream):
- CParser.__init__(self, stream)
- BaseConstructor.__init__(self)
- BaseResolver.__init__(self)
-
-class CSafeLoader(CParser, SafeConstructor, Resolver):
-
- def __init__(self, stream):
- CParser.__init__(self, stream)
- SafeConstructor.__init__(self)
- Resolver.__init__(self)
-
-class CLoader(CParser, Constructor, Resolver):
-
- def __init__(self, stream):
- CParser.__init__(self, stream)
- Constructor.__init__(self)
- Resolver.__init__(self)
-
-class CBaseDumper(CEmitter, BaseRepresenter, BaseResolver):
-
- def __init__(self, stream,
- default_style=None, default_flow_style=None,
- canonical=None, indent=None, width=None,
- allow_unicode=None, line_break=None,
- encoding=None, explicit_start=None, explicit_end=None,
- version=None, tags=None):
- CEmitter.__init__(self, stream, canonical=canonical,
- indent=indent, width=width, encoding=encoding,
- allow_unicode=allow_unicode, line_break=line_break,
- explicit_start=explicit_start, explicit_end=explicit_end,
- version=version, tags=tags)
- Representer.__init__(self, default_style=default_style,
- default_flow_style=default_flow_style)
- Resolver.__init__(self)
-
-class CSafeDumper(CEmitter, SafeRepresenter, Resolver):
-
- def __init__(self, stream,
- default_style=None, default_flow_style=None,
- canonical=None, indent=None, width=None,
- allow_unicode=None, line_break=None,
- encoding=None, explicit_start=None, explicit_end=None,
- version=None, tags=None):
- CEmitter.__init__(self, stream, canonical=canonical,
- indent=indent, width=width, encoding=encoding,
- allow_unicode=allow_unicode, line_break=line_break,
- explicit_start=explicit_start, explicit_end=explicit_end,
- version=version, tags=tags)
- SafeRepresenter.__init__(self, default_style=default_style,
- default_flow_style=default_flow_style)
- Resolver.__init__(self)
-
-class CDumper(CEmitter, Serializer, Representer, Resolver):
-
- def __init__(self, stream,
- default_style=None, default_flow_style=None,
- canonical=None, indent=None, width=None,
- allow_unicode=None, line_break=None,
- encoding=None, explicit_start=None, explicit_end=None,
- version=None, tags=None):
- CEmitter.__init__(self, stream, canonical=canonical,
- indent=indent, width=width, encoding=encoding,
- allow_unicode=allow_unicode, line_break=line_break,
- explicit_start=explicit_start, explicit_end=explicit_end,
- version=version, tags=tags)
- Representer.__init__(self, default_style=default_style,
- default_flow_style=default_flow_style)
- Resolver.__init__(self)
-
diff --git a/src/collectors/python.d.plugin/python_modules/pyyaml3/dumper.py b/src/collectors/python.d.plugin/python_modules/pyyaml3/dumper.py
deleted file mode 100644
index ba590c6e6..000000000
--- a/src/collectors/python.d.plugin/python_modules/pyyaml3/dumper.py
+++ /dev/null
@@ -1,63 +0,0 @@
-# SPDX-License-Identifier: MIT
-
-__all__ = ['BaseDumper', 'SafeDumper', 'Dumper']
-
-from .emitter import *
-from .serializer import *
-from .representer import *
-from .resolver import *
-
-class BaseDumper(Emitter, Serializer, BaseRepresenter, BaseResolver):
-
- def __init__(self, stream,
- default_style=None, default_flow_style=None,
- canonical=None, indent=None, width=None,
- allow_unicode=None, line_break=None,
- encoding=None, explicit_start=None, explicit_end=None,
- version=None, tags=None):
- Emitter.__init__(self, stream, canonical=canonical,
- indent=indent, width=width,
- allow_unicode=allow_unicode, line_break=line_break)
- Serializer.__init__(self, encoding=encoding,
- explicit_start=explicit_start, explicit_end=explicit_end,
- version=version, tags=tags)
- Representer.__init__(self, default_style=default_style,
- default_flow_style=default_flow_style)
- Resolver.__init__(self)
-
-class SafeDumper(Emitter, Serializer, SafeRepresenter, Resolver):
-
- def __init__(self, stream,
- default_style=None, default_flow_style=None,
- canonical=None, indent=None, width=None,
- allow_unicode=None, line_break=None,
- encoding=None, explicit_start=None, explicit_end=None,
- version=None, tags=None):
- Emitter.__init__(self, stream, canonical=canonical,
- indent=indent, width=width,
- allow_unicode=allow_unicode, line_break=line_break)
- Serializer.__init__(self, encoding=encoding,
- explicit_start=explicit_start, explicit_end=explicit_end,
- version=version, tags=tags)
- SafeRepresenter.__init__(self, default_style=default_style,
- default_flow_style=default_flow_style)
- Resolver.__init__(self)
-
-class Dumper(Emitter, Serializer, Representer, Resolver):
-
- def __init__(self, stream,
- default_style=None, default_flow_style=None,
- canonical=None, indent=None, width=None,
- allow_unicode=None, line_break=None,
- encoding=None, explicit_start=None, explicit_end=None,
- version=None, tags=None):
- Emitter.__init__(self, stream, canonical=canonical,
- indent=indent, width=width,
- allow_unicode=allow_unicode, line_break=line_break)
- Serializer.__init__(self, encoding=encoding,
- explicit_start=explicit_start, explicit_end=explicit_end,
- version=version, tags=tags)
- Representer.__init__(self, default_style=default_style,
- default_flow_style=default_flow_style)
- Resolver.__init__(self)
-
diff --git a/src/collectors/python.d.plugin/python_modules/pyyaml3/emitter.py b/src/collectors/python.d.plugin/python_modules/pyyaml3/emitter.py
deleted file mode 100644
index d4be65a8e..000000000
--- a/src/collectors/python.d.plugin/python_modules/pyyaml3/emitter.py
+++ /dev/null
@@ -1,1138 +0,0 @@
-# SPDX-License-Identifier: MIT
-
-# Emitter expects events obeying the following grammar:
-# stream ::= STREAM-START document* STREAM-END
-# document ::= DOCUMENT-START node DOCUMENT-END
-# node ::= SCALAR | sequence | mapping
-# sequence ::= SEQUENCE-START node* SEQUENCE-END
-# mapping ::= MAPPING-START (node node)* MAPPING-END
-
-__all__ = ['Emitter', 'EmitterError']
-
-from .error import YAMLError
-from .events import *
-
-class EmitterError(YAMLError):
- pass
-
-class ScalarAnalysis:
- def __init__(self, scalar, empty, multiline,
- allow_flow_plain, allow_block_plain,
- allow_single_quoted, allow_double_quoted,
- allow_block):
- self.scalar = scalar
- self.empty = empty
- self.multiline = multiline
- self.allow_flow_plain = allow_flow_plain
- self.allow_block_plain = allow_block_plain
- self.allow_single_quoted = allow_single_quoted
- self.allow_double_quoted = allow_double_quoted
- self.allow_block = allow_block
-
-class Emitter:
-
- DEFAULT_TAG_PREFIXES = {
- '!' : '!',
- 'tag:yaml.org,2002:' : '!!',
- }
-
- def __init__(self, stream, canonical=None, indent=None, width=None,
- allow_unicode=None, line_break=None):
-
- # The stream should have the methods `write` and possibly `flush`.
- self.stream = stream
-
- # Encoding can be overriden by STREAM-START.
- self.encoding = None
-
- # Emitter is a state machine with a stack of states to handle nested
- # structures.
- self.states = []
- self.state = self.expect_stream_start
-
- # Current event and the event queue.
- self.events = []
- self.event = None
-
- # The current indentation level and the stack of previous indents.
- self.indents = []
- self.indent = None
-
- # Flow level.
- self.flow_level = 0
-
- # Contexts.
- self.root_context = False
- self.sequence_context = False
- self.mapping_context = False
- self.simple_key_context = False
-
- # Characteristics of the last emitted character:
- # - current position.
- # - is it a whitespace?
- # - is it an indention character
- # (indentation space, '-', '?', or ':')?
- self.line = 0
- self.column = 0
- self.whitespace = True
- self.indention = True
-
- # Whether the document requires an explicit document indicator
- self.open_ended = False
-
- # Formatting details.
- self.canonical = canonical
- self.allow_unicode = allow_unicode
- self.best_indent = 2
- if indent and 1 < indent < 10:
- self.best_indent = indent
- self.best_width = 80
- if width and width > self.best_indent*2:
- self.best_width = width
- self.best_line_break = '\n'
- if line_break in ['\r', '\n', '\r\n']:
- self.best_line_break = line_break
-
- # Tag prefixes.
- self.tag_prefixes = None
-
- # Prepared anchor and tag.
- self.prepared_anchor = None
- self.prepared_tag = None
-
- # Scalar analysis and style.
- self.analysis = None
- self.style = None
-
- def dispose(self):
- # Reset the state attributes (to clear self-references)
- self.states = []
- self.state = None
-
- def emit(self, event):
- self.events.append(event)
- while not self.need_more_events():
- self.event = self.events.pop(0)
- self.state()
- self.event = None
-
- # In some cases, we wait for a few next events before emitting.
-
- def need_more_events(self):
- if not self.events:
- return True
- event = self.events[0]
- if isinstance(event, DocumentStartEvent):
- return self.need_events(1)
- elif isinstance(event, SequenceStartEvent):
- return self.need_events(2)
- elif isinstance(event, MappingStartEvent):
- return self.need_events(3)
- else:
- return False
-
- def need_events(self, count):
- level = 0
- for event in self.events[1:]:
- if isinstance(event, (DocumentStartEvent, CollectionStartEvent)):
- level += 1
- elif isinstance(event, (DocumentEndEvent, CollectionEndEvent)):
- level -= 1
- elif isinstance(event, StreamEndEvent):
- level = -1
- if level < 0:
- return False
- return (len(self.events) < count+1)
-
- def increase_indent(self, flow=False, indentless=False):
- self.indents.append(self.indent)
- if self.indent is None:
- if flow:
- self.indent = self.best_indent
- else:
- self.indent = 0
- elif not indentless:
- self.indent += self.best_indent
-
- # States.
-
- # Stream handlers.
-
- def expect_stream_start(self):
- if isinstance(self.event, StreamStartEvent):
- if self.event.encoding and not hasattr(self.stream, 'encoding'):
- self.encoding = self.event.encoding
- self.write_stream_start()
- self.state = self.expect_first_document_start
- else:
- raise EmitterError("expected StreamStartEvent, but got %s"
- % self.event)
-
- def expect_nothing(self):
- raise EmitterError("expected nothing, but got %s" % self.event)
-
- # Document handlers.
-
- def expect_first_document_start(self):
- return self.expect_document_start(first=True)
-
- def expect_document_start(self, first=False):
- if isinstance(self.event, DocumentStartEvent):
- if (self.event.version or self.event.tags) and self.open_ended:
- self.write_indicator('...', True)
- self.write_indent()
- if self.event.version:
- version_text = self.prepare_version(self.event.version)
- self.write_version_directive(version_text)
- self.tag_prefixes = self.DEFAULT_TAG_PREFIXES.copy()
- if self.event.tags:
- handles = sorted(self.event.tags.keys())
- for handle in handles:
- prefix = self.event.tags[handle]
- self.tag_prefixes[prefix] = handle
- handle_text = self.prepare_tag_handle(handle)
- prefix_text = self.prepare_tag_prefix(prefix)
- self.write_tag_directive(handle_text, prefix_text)
- implicit = (first and not self.event.explicit and not self.canonical
- and not self.event.version and not self.event.tags
- and not self.check_empty_document())
- if not implicit:
- self.write_indent()
- self.write_indicator('---', True)
- if self.canonical:
- self.write_indent()
- self.state = self.expect_document_root
- elif isinstance(self.event, StreamEndEvent):
- if self.open_ended:
- self.write_indicator('...', True)
- self.write_indent()
- self.write_stream_end()
- self.state = self.expect_nothing
- else:
- raise EmitterError("expected DocumentStartEvent, but got %s"
- % self.event)
-
- def expect_document_end(self):
- if isinstance(self.event, DocumentEndEvent):
- self.write_indent()
- if self.event.explicit:
- self.write_indicator('...', True)
- self.write_indent()
- self.flush_stream()
- self.state = self.expect_document_start
- else:
- raise EmitterError("expected DocumentEndEvent, but got %s"
- % self.event)
-
- def expect_document_root(self):
- self.states.append(self.expect_document_end)
- self.expect_node(root=True)
-
- # Node handlers.
-
- def expect_node(self, root=False, sequence=False, mapping=False,
- simple_key=False):
- self.root_context = root
- self.sequence_context = sequence
- self.mapping_context = mapping
- self.simple_key_context = simple_key
- if isinstance(self.event, AliasEvent):
- self.expect_alias()
- elif isinstance(self.event, (ScalarEvent, CollectionStartEvent)):
- self.process_anchor('&')
- self.process_tag()
- if isinstance(self.event, ScalarEvent):
- self.expect_scalar()
- elif isinstance(self.event, SequenceStartEvent):
- if self.flow_level or self.canonical or self.event.flow_style \
- or self.check_empty_sequence():
- self.expect_flow_sequence()
- else:
- self.expect_block_sequence()
- elif isinstance(self.event, MappingStartEvent):
- if self.flow_level or self.canonical or self.event.flow_style \
- or self.check_empty_mapping():
- self.expect_flow_mapping()
- else:
- self.expect_block_mapping()
- else:
- raise EmitterError("expected NodeEvent, but got %s" % self.event)
-
- def expect_alias(self):
- if self.event.anchor is None:
- raise EmitterError("anchor is not specified for alias")
- self.process_anchor('*')
- self.state = self.states.pop()
-
- def expect_scalar(self):
- self.increase_indent(flow=True)
- self.process_scalar()
- self.indent = self.indents.pop()
- self.state = self.states.pop()
-
- # Flow sequence handlers.
-
- def expect_flow_sequence(self):
- self.write_indicator('[', True, whitespace=True)
- self.flow_level += 1
- self.increase_indent(flow=True)
- self.state = self.expect_first_flow_sequence_item
-
- def expect_first_flow_sequence_item(self):
- if isinstance(self.event, SequenceEndEvent):
- self.indent = self.indents.pop()
- self.flow_level -= 1
- self.write_indicator(']', False)
- self.state = self.states.pop()
- else:
- if self.canonical or self.column > self.best_width:
- self.write_indent()
- self.states.append(self.expect_flow_sequence_item)
- self.expect_node(sequence=True)
-
- def expect_flow_sequence_item(self):
- if isinstance(self.event, SequenceEndEvent):
- self.indent = self.indents.pop()
- self.flow_level -= 1
- if self.canonical:
- self.write_indicator(',', False)
- self.write_indent()
- self.write_indicator(']', False)
- self.state = self.states.pop()
- else:
- self.write_indicator(',', False)
- if self.canonical or self.column > self.best_width:
- self.write_indent()
- self.states.append(self.expect_flow_sequence_item)
- self.expect_node(sequence=True)
-
- # Flow mapping handlers.
-
- def expect_flow_mapping(self):
- self.write_indicator('{', True, whitespace=True)
- self.flow_level += 1
- self.increase_indent(flow=True)
- self.state = self.expect_first_flow_mapping_key
-
- def expect_first_flow_mapping_key(self):
- if isinstance(self.event, MappingEndEvent):
- self.indent = self.indents.pop()
- self.flow_level -= 1
- self.write_indicator('}', False)
- self.state = self.states.pop()
- else:
- if self.canonical or self.column > self.best_width:
- self.write_indent()
- if not self.canonical and self.check_simple_key():
- self.states.append(self.expect_flow_mapping_simple_value)
- self.expect_node(mapping=True, simple_key=True)
- else:
- self.write_indicator('?', True)
- self.states.append(self.expect_flow_mapping_value)
- self.expect_node(mapping=True)
-
- def expect_flow_mapping_key(self):
- if isinstance(self.event, MappingEndEvent):
- self.indent = self.indents.pop()
- self.flow_level -= 1
- if self.canonical:
- self.write_indicator(',', False)
- self.write_indent()
- self.write_indicator('}', False)
- self.state = self.states.pop()
- else:
- self.write_indicator(',', False)
- if self.canonical or self.column > self.best_width:
- self.write_indent()
- if not self.canonical and self.check_simple_key():
- self.states.append(self.expect_flow_mapping_simple_value)
- self.expect_node(mapping=True, simple_key=True)
- else:
- self.write_indicator('?', True)
- self.states.append(self.expect_flow_mapping_value)
- self.expect_node(mapping=True)
-
- def expect_flow_mapping_simple_value(self):
- self.write_indicator(':', False)
- self.states.append(self.expect_flow_mapping_key)
- self.expect_node(mapping=True)
-
- def expect_flow_mapping_value(self):
- if self.canonical or self.column > self.best_width:
- self.write_indent()
- self.write_indicator(':', True)
- self.states.append(self.expect_flow_mapping_key)
- self.expect_node(mapping=True)
-
- # Block sequence handlers.
-
- def expect_block_sequence(self):
- indentless = (self.mapping_context and not self.indention)
- self.increase_indent(flow=False, indentless=indentless)
- self.state = self.expect_first_block_sequence_item
-
- def expect_first_block_sequence_item(self):
- return self.expect_block_sequence_item(first=True)
-
- def expect_block_sequence_item(self, first=False):
- if not first and isinstance(self.event, SequenceEndEvent):
- self.indent = self.indents.pop()
- self.state = self.states.pop()
- else:
- self.write_indent()
- self.write_indicator('-', True, indention=True)
- self.states.append(self.expect_block_sequence_item)
- self.expect_node(sequence=True)
-
- # Block mapping handlers.
-
- def expect_block_mapping(self):
- self.increase_indent(flow=False)
- self.state = self.expect_first_block_mapping_key
-
- def expect_first_block_mapping_key(self):
- return self.expect_block_mapping_key(first=True)
-
- def expect_block_mapping_key(self, first=False):
- if not first and isinstance(self.event, MappingEndEvent):
- self.indent = self.indents.pop()
- self.state = self.states.pop()
- else:
- self.write_indent()
- if self.check_simple_key():
- self.states.append(self.expect_block_mapping_simple_value)
- self.expect_node(mapping=True, simple_key=True)
- else:
- self.write_indicator('?', True, indention=True)
- self.states.append(self.expect_block_mapping_value)
- self.expect_node(mapping=True)
-
- def expect_block_mapping_simple_value(self):
- self.write_indicator(':', False)
- self.states.append(self.expect_block_mapping_key)
- self.expect_node(mapping=True)
-
- def expect_block_mapping_value(self):
- self.write_indent()
- self.write_indicator(':', True, indention=True)
- self.states.append(self.expect_block_mapping_key)
- self.expect_node(mapping=True)
-
- # Checkers.
-
- def check_empty_sequence(self):
- return (isinstance(self.event, SequenceStartEvent) and self.events
- and isinstance(self.events[0], SequenceEndEvent))
-
- def check_empty_mapping(self):
- return (isinstance(self.event, MappingStartEvent) and self.events
- and isinstance(self.events[0], MappingEndEvent))
-
- def check_empty_document(self):
- if not isinstance(self.event, DocumentStartEvent) or not self.events:
- return False
- event = self.events[0]
- return (isinstance(event, ScalarEvent) and event.anchor is None
- and event.tag is None and event.implicit and event.value == '')
-
- def check_simple_key(self):
- length = 0
- if isinstance(self.event, NodeEvent) and self.event.anchor is not None:
- if self.prepared_anchor is None:
- self.prepared_anchor = self.prepare_anchor(self.event.anchor)
- length += len(self.prepared_anchor)
- if isinstance(self.event, (ScalarEvent, CollectionStartEvent)) \
- and self.event.tag is not None:
- if self.prepared_tag is None:
- self.prepared_tag = self.prepare_tag(self.event.tag)
- length += len(self.prepared_tag)
- if isinstance(self.event, ScalarEvent):
- if self.analysis is None:
- self.analysis = self.analyze_scalar(self.event.value)
- length += len(self.analysis.scalar)
- return (length < 128 and (isinstance(self.event, AliasEvent)
- or (isinstance(self.event, ScalarEvent)
- and not self.analysis.empty and not self.analysis.multiline)
- or self.check_empty_sequence() or self.check_empty_mapping()))
-
- # Anchor, Tag, and Scalar processors.
-
- def process_anchor(self, indicator):
- if self.event.anchor is None:
- self.prepared_anchor = None
- return
- if self.prepared_anchor is None:
- self.prepared_anchor = self.prepare_anchor(self.event.anchor)
- if self.prepared_anchor:
- self.write_indicator(indicator+self.prepared_anchor, True)
- self.prepared_anchor = None
-
- def process_tag(self):
- tag = self.event.tag
- if isinstance(self.event, ScalarEvent):
- if self.style is None:
- self.style = self.choose_scalar_style()
- if ((not self.canonical or tag is None) and
- ((self.style == '' and self.event.implicit[0])
- or (self.style != '' and self.event.implicit[1]))):
- self.prepared_tag = None
- return
- if self.event.implicit[0] and tag is None:
- tag = '!'
- self.prepared_tag = None
- else:
- if (not self.canonical or tag is None) and self.event.implicit:
- self.prepared_tag = None
- return
- if tag is None:
- raise EmitterError("tag is not specified")
- if self.prepared_tag is None:
- self.prepared_tag = self.prepare_tag(tag)
- if self.prepared_tag:
- self.write_indicator(self.prepared_tag, True)
- self.prepared_tag = None
-
- def choose_scalar_style(self):
- if self.analysis is None:
- self.analysis = self.analyze_scalar(self.event.value)
- if self.event.style == '"' or self.canonical:
- return '"'
- if not self.event.style and self.event.implicit[0]:
- if (not (self.simple_key_context and
- (self.analysis.empty or self.analysis.multiline))
- and (self.flow_level and self.analysis.allow_flow_plain
- or (not self.flow_level and self.analysis.allow_block_plain))):
- return ''
- if self.event.style and self.event.style in '|>':
- if (not self.flow_level and not self.simple_key_context
- and self.analysis.allow_block):
- return self.event.style
- if not self.event.style or self.event.style == '\'':
- if (self.analysis.allow_single_quoted and
- not (self.simple_key_context and self.analysis.multiline)):
- return '\''
- return '"'
-
- def process_scalar(self):
- if self.analysis is None:
- self.analysis = self.analyze_scalar(self.event.value)
- if self.style is None:
- self.style = self.choose_scalar_style()
- split = (not self.simple_key_context)
- #if self.analysis.multiline and split \
- # and (not self.style or self.style in '\'\"'):
- # self.write_indent()
- if self.style == '"':
- self.write_double_quoted(self.analysis.scalar, split)
- elif self.style == '\'':
- self.write_single_quoted(self.analysis.scalar, split)
- elif self.style == '>':
- self.write_folded(self.analysis.scalar)
- elif self.style == '|':
- self.write_literal(self.analysis.scalar)
- else:
- self.write_plain(self.analysis.scalar, split)
- self.analysis = None
- self.style = None
-
- # Analyzers.
-
- def prepare_version(self, version):
- major, minor = version
- if major != 1:
- raise EmitterError("unsupported YAML version: %d.%d" % (major, minor))
- return '%d.%d' % (major, minor)
-
- def prepare_tag_handle(self, handle):
- if not handle:
- raise EmitterError("tag handle must not be empty")
- if handle[0] != '!' or handle[-1] != '!':
- raise EmitterError("tag handle must start and end with '!': %r" % handle)
- for ch in handle[1:-1]:
- if not ('0' <= ch <= '9' or 'A' <= ch <= 'Z' or 'a' <= ch <= 'z' \
- or ch in '-_'):
- raise EmitterError("invalid character %r in the tag handle: %r"
- % (ch, handle))
- return handle
-
- def prepare_tag_prefix(self, prefix):
- if not prefix:
- raise EmitterError("tag prefix must not be empty")
- chunks = []
- start = end = 0
- if prefix[0] == '!':
- end = 1
- while end < len(prefix):
- ch = prefix[end]
- if '0' <= ch <= '9' or 'A' <= ch <= 'Z' or 'a' <= ch <= 'z' \
- or ch in '-;/?!:@&=+$,_.~*\'()[]':
- end += 1
- else:
- if start < end:
- chunks.append(prefix[start:end])
- start = end = end+1
- data = ch.encode('utf-8')
- for ch in data:
- chunks.append('%%%02X' % ord(ch))
- if start < end:
- chunks.append(prefix[start:end])
- return ''.join(chunks)
-
- def prepare_tag(self, tag):
- if not tag:
- raise EmitterError("tag must not be empty")
- if tag == '!':
- return tag
- handle = None
- suffix = tag
- prefixes = sorted(self.tag_prefixes.keys())
- for prefix in prefixes:
- if tag.startswith(prefix) \
- and (prefix == '!' or len(prefix) < len(tag)):
- handle = self.tag_prefixes[prefix]
- suffix = tag[len(prefix):]
- chunks = []
- start = end = 0
- while end < len(suffix):
- ch = suffix[end]
- if '0' <= ch <= '9' or 'A' <= ch <= 'Z' or 'a' <= ch <= 'z' \
- or ch in '-;/?:@&=+$,_.~*\'()[]' \
- or (ch == '!' and handle != '!'):
- end += 1
- else:
- if start < end:
- chunks.append(suffix[start:end])
- start = end = end+1
- data = ch.encode('utf-8')
- for ch in data:
- chunks.append('%%%02X' % ord(ch))
- if start < end:
- chunks.append(suffix[start:end])
- suffix_text = ''.join(chunks)
- if handle:
- return '%s%s' % (handle, suffix_text)
- else:
- return '!<%s>' % suffix_text
-
- def prepare_anchor(self, anchor):
- if not anchor:
- raise EmitterError("anchor must not be empty")
- for ch in anchor:
- if not ('0' <= ch <= '9' or 'A' <= ch <= 'Z' or 'a' <= ch <= 'z' \
- or ch in '-_'):
- raise EmitterError("invalid character %r in the anchor: %r"
- % (ch, anchor))
- return anchor
-
- def analyze_scalar(self, scalar):
-
- # Empty scalar is a special case.
- if not scalar:
- return ScalarAnalysis(scalar=scalar, empty=True, multiline=False,
- allow_flow_plain=False, allow_block_plain=True,
- allow_single_quoted=True, allow_double_quoted=True,
- allow_block=False)
-
- # Indicators and special characters.
- block_indicators = False
- flow_indicators = False
- line_breaks = False
- special_characters = False
-
- # Important whitespace combinations.
- leading_space = False
- leading_break = False
- trailing_space = False
- trailing_break = False
- break_space = False
- space_break = False
-
- # Check document indicators.
- if scalar.startswith('---') or scalar.startswith('...'):
- block_indicators = True
- flow_indicators = True
-
- # First character or preceded by a whitespace.
- preceeded_by_whitespace = True
-
- # Last character or followed by a whitespace.
- followed_by_whitespace = (len(scalar) == 1 or
- scalar[1] in '\0 \t\r\n\x85\u2028\u2029')
-
- # The previous character is a space.
- previous_space = False
-
- # The previous character is a break.
- previous_break = False
-
- index = 0
- while index < len(scalar):
- ch = scalar[index]
-
- # Check for indicators.
- if index == 0:
- # Leading indicators are special characters.
- if ch in '#,[]{}&*!|>\'\"%@`':
- flow_indicators = True
- block_indicators = True
- if ch in '?:':
- flow_indicators = True
- if followed_by_whitespace:
- block_indicators = True
- if ch == '-' and followed_by_whitespace:
- flow_indicators = True
- block_indicators = True
- else:
- # Some indicators cannot appear within a scalar as well.
- if ch in ',?[]{}':
- flow_indicators = True
- if ch == ':':
- flow_indicators = True
- if followed_by_whitespace:
- block_indicators = True
- if ch == '#' and preceeded_by_whitespace:
- flow_indicators = True
- block_indicators = True
-
- # Check for line breaks, special, and unicode characters.
- if ch in '\n\x85\u2028\u2029':
- line_breaks = True
- if not (ch == '\n' or '\x20' <= ch <= '\x7E'):
- if (ch == '\x85' or '\xA0' <= ch <= '\uD7FF'
- or '\uE000' <= ch <= '\uFFFD') and ch != '\uFEFF':
- unicode_characters = True
- if not self.allow_unicode:
- special_characters = True
- else:
- special_characters = True
-
- # Detect important whitespace combinations.
- if ch == ' ':
- if index == 0:
- leading_space = True
- if index == len(scalar)-1:
- trailing_space = True
- if previous_break:
- break_space = True
- previous_space = True
- previous_break = False
- elif ch in '\n\x85\u2028\u2029':
- if index == 0:
- leading_break = True
- if index == len(scalar)-1:
- trailing_break = True
- if previous_space:
- space_break = True
- previous_space = False
- previous_break = True
- else:
- previous_space = False
- previous_break = False
-
- # Prepare for the next character.
- index += 1
- preceeded_by_whitespace = (ch in '\0 \t\r\n\x85\u2028\u2029')
- followed_by_whitespace = (index+1 >= len(scalar) or
- scalar[index+1] in '\0 \t\r\n\x85\u2028\u2029')
-
- # Let's decide what styles are allowed.
- allow_flow_plain = True
- allow_block_plain = True
- allow_single_quoted = True
- allow_double_quoted = True
- allow_block = True
-
- # Leading and trailing whitespaces are bad for plain scalars.
- if (leading_space or leading_break
- or trailing_space or trailing_break):
- allow_flow_plain = allow_block_plain = False
-
- # We do not permit trailing spaces for block scalars.
- if trailing_space:
- allow_block = False
-
- # Spaces at the beginning of a new line are only acceptable for block
- # scalars.
- if break_space:
- allow_flow_plain = allow_block_plain = allow_single_quoted = False
-
- # Spaces followed by breaks, as well as special character are only
- # allowed for double quoted scalars.
- if space_break or special_characters:
- allow_flow_plain = allow_block_plain = \
- allow_single_quoted = allow_block = False
-
- # Although the plain scalar writer supports breaks, we never emit
- # multiline plain scalars.
- if line_breaks:
- allow_flow_plain = allow_block_plain = False
-
- # Flow indicators are forbidden for flow plain scalars.
- if flow_indicators:
- allow_flow_plain = False
-
- # Block indicators are forbidden for block plain scalars.
- if block_indicators:
- allow_block_plain = False
-
- return ScalarAnalysis(scalar=scalar,
- empty=False, multiline=line_breaks,
- allow_flow_plain=allow_flow_plain,
- allow_block_plain=allow_block_plain,
- allow_single_quoted=allow_single_quoted,
- allow_double_quoted=allow_double_quoted,
- allow_block=allow_block)
-
- # Writers.
-
- def flush_stream(self):
- if hasattr(self.stream, 'flush'):
- self.stream.flush()
-
- def write_stream_start(self):
- # Write BOM if needed.
- if self.encoding and self.encoding.startswith('utf-16'):
- self.stream.write('\uFEFF'.encode(self.encoding))
-
- def write_stream_end(self):
- self.flush_stream()
-
- def write_indicator(self, indicator, need_whitespace,
- whitespace=False, indention=False):
- if self.whitespace or not need_whitespace:
- data = indicator
- else:
- data = ' '+indicator
- self.whitespace = whitespace
- self.indention = self.indention and indention
- self.column += len(data)
- self.open_ended = False
- if self.encoding:
- data = data.encode(self.encoding)
- self.stream.write(data)
-
- def write_indent(self):
- indent = self.indent or 0
- if not self.indention or self.column > indent \
- or (self.column == indent and not self.whitespace):
- self.write_line_break()
- if self.column < indent:
- self.whitespace = True
- data = ' '*(indent-self.column)
- self.column = indent
- if self.encoding:
- data = data.encode(self.encoding)
- self.stream.write(data)
-
- def write_line_break(self, data=None):
- if data is None:
- data = self.best_line_break
- self.whitespace = True
- self.indention = True
- self.line += 1
- self.column = 0
- if self.encoding:
- data = data.encode(self.encoding)
- self.stream.write(data)
-
- def write_version_directive(self, version_text):
- data = '%%YAML %s' % version_text
- if self.encoding:
- data = data.encode(self.encoding)
- self.stream.write(data)
- self.write_line_break()
-
- def write_tag_directive(self, handle_text, prefix_text):
- data = '%%TAG %s %s' % (handle_text, prefix_text)
- if self.encoding:
- data = data.encode(self.encoding)
- self.stream.write(data)
- self.write_line_break()
-
- # Scalar streams.
-
- def write_single_quoted(self, text, split=True):
- self.write_indicator('\'', True)
- spaces = False
- breaks = False
- start = end = 0
- while end <= len(text):
- ch = None
- if end < len(text):
- ch = text[end]
- if spaces:
- if ch is None or ch != ' ':
- if start+1 == end and self.column > self.best_width and split \
- and start != 0 and end != len(text):
- self.write_indent()
- else:
- data = text[start:end]
- self.column += len(data)
- if self.encoding:
- data = data.encode(self.encoding)
- self.stream.write(data)
- start = end
- elif breaks:
- if ch is None or ch not in '\n\x85\u2028\u2029':
- if text[start] == '\n':
- self.write_line_break()
- for br in text[start:end]:
- if br == '\n':
- self.write_line_break()
- else:
- self.write_line_break(br)
- self.write_indent()
- start = end
- else:
- if ch is None or ch in ' \n\x85\u2028\u2029' or ch == '\'':
- if start < end:
- data = text[start:end]
- self.column += len(data)
- if self.encoding:
- data = data.encode(self.encoding)
- self.stream.write(data)
- start = end
- if ch == '\'':
- data = '\'\''
- self.column += 2
- if self.encoding:
- data = data.encode(self.encoding)
- self.stream.write(data)
- start = end + 1
- if ch is not None:
- spaces = (ch == ' ')
- breaks = (ch in '\n\x85\u2028\u2029')
- end += 1
- self.write_indicator('\'', False)
-
- ESCAPE_REPLACEMENTS = {
- '\0': '0',
- '\x07': 'a',
- '\x08': 'b',
- '\x09': 't',
- '\x0A': 'n',
- '\x0B': 'v',
- '\x0C': 'f',
- '\x0D': 'r',
- '\x1B': 'e',
- '\"': '\"',
- '\\': '\\',
- '\x85': 'N',
- '\xA0': '_',
- '\u2028': 'L',
- '\u2029': 'P',
- }
-
- def write_double_quoted(self, text, split=True):
- self.write_indicator('"', True)
- start = end = 0
- while end <= len(text):
- ch = None
- if end < len(text):
- ch = text[end]
- if ch is None or ch in '"\\\x85\u2028\u2029\uFEFF' \
- or not ('\x20' <= ch <= '\x7E'
- or (self.allow_unicode
- and ('\xA0' <= ch <= '\uD7FF'
- or '\uE000' <= ch <= '\uFFFD'))):
- if start < end:
- data = text[start:end]
- self.column += len(data)
- if self.encoding:
- data = data.encode(self.encoding)
- self.stream.write(data)
- start = end
- if ch is not None:
- if ch in self.ESCAPE_REPLACEMENTS:
- data = '\\'+self.ESCAPE_REPLACEMENTS[ch]
- elif ch <= '\xFF':
- data = '\\x%02X' % ord(ch)
- elif ch <= '\uFFFF':
- data = '\\u%04X' % ord(ch)
- else:
- data = '\\U%08X' % ord(ch)
- self.column += len(data)
- if self.encoding:
- data = data.encode(self.encoding)
- self.stream.write(data)
- start = end+1
- if 0 < end < len(text)-1 and (ch == ' ' or start >= end) \
- and self.column+(end-start) > self.best_width and split:
- data = text[start:end]+'\\'
- if start < end:
- start = end
- self.column += len(data)
- if self.encoding:
- data = data.encode(self.encoding)
- self.stream.write(data)
- self.write_indent()
- self.whitespace = False
- self.indention = False
- if text[start] == ' ':
- data = '\\'
- self.column += len(data)
- if self.encoding:
- data = data.encode(self.encoding)
- self.stream.write(data)
- end += 1
- self.write_indicator('"', False)
-
- def determine_block_hints(self, text):
- hints = ''
- if text:
- if text[0] in ' \n\x85\u2028\u2029':
- hints += str(self.best_indent)
- if text[-1] not in '\n\x85\u2028\u2029':
- hints += '-'
- elif len(text) == 1 or text[-2] in '\n\x85\u2028\u2029':
- hints += '+'
- return hints
-
- def write_folded(self, text):
- hints = self.determine_block_hints(text)
- self.write_indicator('>'+hints, True)
- if hints[-1:] == '+':
- self.open_ended = True
- self.write_line_break()
- leading_space = True
- spaces = False
- breaks = True
- start = end = 0
- while end <= len(text):
- ch = None
- if end < len(text):
- ch = text[end]
- if breaks:
- if ch is None or ch not in '\n\x85\u2028\u2029':
- if not leading_space and ch is not None and ch != ' ' \
- and text[start] == '\n':
- self.write_line_break()
- leading_space = (ch == ' ')
- for br in text[start:end]:
- if br == '\n':
- self.write_line_break()
- else:
- self.write_line_break(br)
- if ch is not None:
- self.write_indent()
- start = end
- elif spaces:
- if ch != ' ':
- if start+1 == end and self.column > self.best_width:
- self.write_indent()
- else:
- data = text[start:end]
- self.column += len(data)
- if self.encoding:
- data = data.encode(self.encoding)
- self.stream.write(data)
- start = end
- else:
- if ch is None or ch in ' \n\x85\u2028\u2029':
- data = text[start:end]
- self.column += len(data)
- if self.encoding:
- data = data.encode(self.encoding)
- self.stream.write(data)
- if ch is None:
- self.write_line_break()
- start = end
- if ch is not None:
- breaks = (ch in '\n\x85\u2028\u2029')
- spaces = (ch == ' ')
- end += 1
-
- def write_literal(self, text):
- hints = self.determine_block_hints(text)
- self.write_indicator('|'+hints, True)
- if hints[-1:] == '+':
- self.open_ended = True
- self.write_line_break()
- breaks = True
- start = end = 0
- while end <= len(text):
- ch = None
- if end < len(text):
- ch = text[end]
- if breaks:
- if ch is None or ch not in '\n\x85\u2028\u2029':
- for br in text[start:end]:
- if br == '\n':
- self.write_line_break()
- else:
- self.write_line_break(br)
- if ch is not None:
- self.write_indent()
- start = end
- else:
- if ch is None or ch in '\n\x85\u2028\u2029':
- data = text[start:end]
- if self.encoding:
- data = data.encode(self.encoding)
- self.stream.write(data)
- if ch is None:
- self.write_line_break()
- start = end
- if ch is not None:
- breaks = (ch in '\n\x85\u2028\u2029')
- end += 1
-
- def write_plain(self, text, split=True):
- if self.root_context:
- self.open_ended = True
- if not text:
- return
- if not self.whitespace:
- data = ' '
- self.column += len(data)
- if self.encoding:
- data = data.encode(self.encoding)
- self.stream.write(data)
- self.whitespace = False
- self.indention = False
- spaces = False
- breaks = False
- start = end = 0
- while end <= len(text):
- ch = None
- if end < len(text):
- ch = text[end]
- if spaces:
- if ch != ' ':
- if start+1 == end and self.column > self.best_width and split:
- self.write_indent()
- self.whitespace = False
- self.indention = False
- else:
- data = text[start:end]
- self.column += len(data)
- if self.encoding:
- data = data.encode(self.encoding)
- self.stream.write(data)
- start = end
- elif breaks:
- if ch not in '\n\x85\u2028\u2029':
- if text[start] == '\n':
- self.write_line_break()
- for br in text[start:end]:
- if br == '\n':
- self.write_line_break()
- else:
- self.write_line_break(br)
- self.write_indent()
- self.whitespace = False
- self.indention = False
- start = end
- else:
- if ch is None or ch in ' \n\x85\u2028\u2029':
- data = text[start:end]
- self.column += len(data)
- if self.encoding:
- data = data.encode(self.encoding)
- self.stream.write(data)
- start = end
- if ch is not None:
- spaces = (ch == ' ')
- breaks = (ch in '\n\x85\u2028\u2029')
- end += 1
-
diff --git a/src/collectors/python.d.plugin/python_modules/pyyaml3/error.py b/src/collectors/python.d.plugin/python_modules/pyyaml3/error.py
deleted file mode 100644
index 5fec7d449..000000000
--- a/src/collectors/python.d.plugin/python_modules/pyyaml3/error.py
+++ /dev/null
@@ -1,76 +0,0 @@
-# SPDX-License-Identifier: MIT
-
-__all__ = ['Mark', 'YAMLError', 'MarkedYAMLError']
-
-class Mark:
-
- def __init__(self, name, index, line, column, buffer, pointer):
- self.name = name
- self.index = index
- self.line = line
- self.column = column
- self.buffer = buffer
- self.pointer = pointer
-
- def get_snippet(self, indent=4, max_length=75):
- if self.buffer is None:
- return None
- head = ''
- start = self.pointer
- while start > 0 and self.buffer[start-1] not in '\0\r\n\x85\u2028\u2029':
- start -= 1
- if self.pointer-start > max_length/2-1:
- head = ' ... '
- start += 5
- break
- tail = ''
- end = self.pointer
- while end < len(self.buffer) and self.buffer[end] not in '\0\r\n\x85\u2028\u2029':
- end += 1
- if end-self.pointer > max_length/2-1:
- tail = ' ... '
- end -= 5
- break
- snippet = self.buffer[start:end]
- return ' '*indent + head + snippet + tail + '\n' \
- + ' '*(indent+self.pointer-start+len(head)) + '^'
-
- def __str__(self):
- snippet = self.get_snippet()
- where = " in \"%s\", line %d, column %d" \
- % (self.name, self.line+1, self.column+1)
- if snippet is not None:
- where += ":\n"+snippet
- return where
-
-class YAMLError(Exception):
- pass
-
-class MarkedYAMLError(YAMLError):
-
- def __init__(self, context=None, context_mark=None,
- problem=None, problem_mark=None, note=None):
- self.context = context
- self.context_mark = context_mark
- self.problem = problem
- self.problem_mark = problem_mark
- self.note = note
-
- def __str__(self):
- lines = []
- if self.context is not None:
- lines.append(self.context)
- if self.context_mark is not None \
- and (self.problem is None or self.problem_mark is None
- or self.context_mark.name != self.problem_mark.name
- or self.context_mark.line != self.problem_mark.line
- or self.context_mark.column != self.problem_mark.column):
- lines.append(str(self.context_mark))
- if self.problem is not None:
- lines.append(self.problem)
- if self.problem_mark is not None:
- lines.append(str(self.problem_mark))
- if self.note is not None:
- lines.append(self.note)
- return '\n'.join(lines)
-
diff --git a/src/collectors/python.d.plugin/python_modules/pyyaml3/events.py b/src/collectors/python.d.plugin/python_modules/pyyaml3/events.py
deleted file mode 100644
index 283452add..000000000
--- a/src/collectors/python.d.plugin/python_modules/pyyaml3/events.py
+++ /dev/null
@@ -1,87 +0,0 @@
-# SPDX-License-Identifier: MIT
-
-# Abstract classes.
-
-class Event(object):
- def __init__(self, start_mark=None, end_mark=None):
- self.start_mark = start_mark
- self.end_mark = end_mark
- def __repr__(self):
- attributes = [key for key in ['anchor', 'tag', 'implicit', 'value']
- if hasattr(self, key)]
- arguments = ', '.join(['%s=%r' % (key, getattr(self, key))
- for key in attributes])
- return '%s(%s)' % (self.__class__.__name__, arguments)
-
-class NodeEvent(Event):
- def __init__(self, anchor, start_mark=None, end_mark=None):
- self.anchor = anchor
- self.start_mark = start_mark
- self.end_mark = end_mark
-
-class CollectionStartEvent(NodeEvent):
- def __init__(self, anchor, tag, implicit, start_mark=None, end_mark=None,
- flow_style=None):
- self.anchor = anchor
- self.tag = tag
- self.implicit = implicit
- self.start_mark = start_mark
- self.end_mark = end_mark
- self.flow_style = flow_style
-
-class CollectionEndEvent(Event):
- pass
-
-# Implementations.
-
-class StreamStartEvent(Event):
- def __init__(self, start_mark=None, end_mark=None, encoding=None):
- self.start_mark = start_mark
- self.end_mark = end_mark
- self.encoding = encoding
-
-class StreamEndEvent(Event):
- pass
-
-class DocumentStartEvent(Event):
- def __init__(self, start_mark=None, end_mark=None,
- explicit=None, version=None, tags=None):
- self.start_mark = start_mark
- self.end_mark = end_mark
- self.explicit = explicit
- self.version = version
- self.tags = tags
-
-class DocumentEndEvent(Event):
- def __init__(self, start_mark=None, end_mark=None,
- explicit=None):
- self.start_mark = start_mark
- self.end_mark = end_mark
- self.explicit = explicit
-
-class AliasEvent(NodeEvent):
- pass
-
-class ScalarEvent(NodeEvent):
- def __init__(self, anchor, tag, implicit, value,
- start_mark=None, end_mark=None, style=None):
- self.anchor = anchor
- self.tag = tag
- self.implicit = implicit
- self.value = value
- self.start_mark = start_mark
- self.end_mark = end_mark
- self.style = style
-
-class SequenceStartEvent(CollectionStartEvent):
- pass
-
-class SequenceEndEvent(CollectionEndEvent):
- pass
-
-class MappingStartEvent(CollectionStartEvent):
- pass
-
-class MappingEndEvent(CollectionEndEvent):
- pass
-
diff --git a/src/collectors/python.d.plugin/python_modules/pyyaml3/loader.py b/src/collectors/python.d.plugin/python_modules/pyyaml3/loader.py
deleted file mode 100644
index 7ef6cf815..000000000
--- a/src/collectors/python.d.plugin/python_modules/pyyaml3/loader.py
+++ /dev/null
@@ -1,41 +0,0 @@
-# SPDX-License-Identifier: MIT
-
-__all__ = ['BaseLoader', 'SafeLoader', 'Loader']
-
-from .reader import *
-from .scanner import *
-from .parser import *
-from .composer import *
-from .constructor import *
-from .resolver import *
-
-class BaseLoader(Reader, Scanner, Parser, Composer, BaseConstructor, BaseResolver):
-
- def __init__(self, stream):
- Reader.__init__(self, stream)
- Scanner.__init__(self)
- Parser.__init__(self)
- Composer.__init__(self)
- BaseConstructor.__init__(self)
- BaseResolver.__init__(self)
-
-class SafeLoader(Reader, Scanner, Parser, Composer, SafeConstructor, Resolver):
-
- def __init__(self, stream):
- Reader.__init__(self, stream)
- Scanner.__init__(self)
- Parser.__init__(self)
- Composer.__init__(self)
- SafeConstructor.__init__(self)
- Resolver.__init__(self)
-
-class Loader(Reader, Scanner, Parser, Composer, Constructor, Resolver):
-
- def __init__(self, stream):
- Reader.__init__(self, stream)
- Scanner.__init__(self)
- Parser.__init__(self)
- Composer.__init__(self)
- Constructor.__init__(self)
- Resolver.__init__(self)
-
diff --git a/src/collectors/python.d.plugin/python_modules/pyyaml3/nodes.py b/src/collectors/python.d.plugin/python_modules/pyyaml3/nodes.py
deleted file mode 100644
index ed2a1b43e..000000000
--- a/src/collectors/python.d.plugin/python_modules/pyyaml3/nodes.py
+++ /dev/null
@@ -1,50 +0,0 @@
-# SPDX-License-Identifier: MIT
-
-class Node(object):
- def __init__(self, tag, value, start_mark, end_mark):
- self.tag = tag
- self.value = value
- self.start_mark = start_mark
- self.end_mark = end_mark
- def __repr__(self):
- value = self.value
- #if isinstance(value, list):
- # if len(value) == 0:
- # value = '<empty>'
- # elif len(value) == 1:
- # value = '<1 item>'
- # else:
- # value = '<%d items>' % len(value)
- #else:
- # if len(value) > 75:
- # value = repr(value[:70]+u' ... ')
- # else:
- # value = repr(value)
- value = repr(value)
- return '%s(tag=%r, value=%s)' % (self.__class__.__name__, self.tag, value)
-
-class ScalarNode(Node):
- id = 'scalar'
- def __init__(self, tag, value,
- start_mark=None, end_mark=None, style=None):
- self.tag = tag
- self.value = value
- self.start_mark = start_mark
- self.end_mark = end_mark
- self.style = style
-
-class CollectionNode(Node):
- def __init__(self, tag, value,
- start_mark=None, end_mark=None, flow_style=None):
- self.tag = tag
- self.value = value
- self.start_mark = start_mark
- self.end_mark = end_mark
- self.flow_style = flow_style
-
-class SequenceNode(CollectionNode):
- id = 'sequence'
-
-class MappingNode(CollectionNode):
- id = 'mapping'
-
diff --git a/src/collectors/python.d.plugin/python_modules/pyyaml3/parser.py b/src/collectors/python.d.plugin/python_modules/pyyaml3/parser.py
deleted file mode 100644
index bcec7f994..000000000
--- a/src/collectors/python.d.plugin/python_modules/pyyaml3/parser.py
+++ /dev/null
@@ -1,590 +0,0 @@
-# SPDX-License-Identifier: MIT
-
-# The following YAML grammar is LL(1) and is parsed by a recursive descent
-# parser.
-#
-# stream ::= STREAM-START implicit_document? explicit_document* STREAM-END
-# implicit_document ::= block_node DOCUMENT-END*
-# explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
-# block_node_or_indentless_sequence ::=
-# ALIAS
-# | properties (block_content | indentless_block_sequence)?
-# | block_content
-# | indentless_block_sequence
-# block_node ::= ALIAS
-# | properties block_content?
-# | block_content
-# flow_node ::= ALIAS
-# | properties flow_content?
-# | flow_content
-# properties ::= TAG ANCHOR? | ANCHOR TAG?
-# block_content ::= block_collection | flow_collection | SCALAR
-# flow_content ::= flow_collection | SCALAR
-# block_collection ::= block_sequence | block_mapping
-# flow_collection ::= flow_sequence | flow_mapping
-# block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END
-# indentless_sequence ::= (BLOCK-ENTRY block_node?)+
-# block_mapping ::= BLOCK-MAPPING_START
-# ((KEY block_node_or_indentless_sequence?)?
-# (VALUE block_node_or_indentless_sequence?)?)*
-# BLOCK-END
-# flow_sequence ::= FLOW-SEQUENCE-START
-# (flow_sequence_entry FLOW-ENTRY)*
-# flow_sequence_entry?
-# FLOW-SEQUENCE-END
-# flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
-# flow_mapping ::= FLOW-MAPPING-START
-# (flow_mapping_entry FLOW-ENTRY)*
-# flow_mapping_entry?
-# FLOW-MAPPING-END
-# flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
-#
-# FIRST sets:
-#
-# stream: { STREAM-START }
-# explicit_document: { DIRECTIVE DOCUMENT-START }
-# implicit_document: FIRST(block_node)
-# block_node: { ALIAS TAG ANCHOR SCALAR BLOCK-SEQUENCE-START BLOCK-MAPPING-START FLOW-SEQUENCE-START FLOW-MAPPING-START }
-# flow_node: { ALIAS ANCHOR TAG SCALAR FLOW-SEQUENCE-START FLOW-MAPPING-START }
-# block_content: { BLOCK-SEQUENCE-START BLOCK-MAPPING-START FLOW-SEQUENCE-START FLOW-MAPPING-START SCALAR }
-# flow_content: { FLOW-SEQUENCE-START FLOW-MAPPING-START SCALAR }
-# block_collection: { BLOCK-SEQUENCE-START BLOCK-MAPPING-START }
-# flow_collection: { FLOW-SEQUENCE-START FLOW-MAPPING-START }
-# block_sequence: { BLOCK-SEQUENCE-START }
-# block_mapping: { BLOCK-MAPPING-START }
-# block_node_or_indentless_sequence: { ALIAS ANCHOR TAG SCALAR BLOCK-SEQUENCE-START BLOCK-MAPPING-START FLOW-SEQUENCE-START FLOW-MAPPING-START BLOCK-ENTRY }
-# indentless_sequence: { ENTRY }
-# flow_collection: { FLOW-SEQUENCE-START FLOW-MAPPING-START }
-# flow_sequence: { FLOW-SEQUENCE-START }
-# flow_mapping: { FLOW-MAPPING-START }
-# flow_sequence_entry: { ALIAS ANCHOR TAG SCALAR FLOW-SEQUENCE-START FLOW-MAPPING-START KEY }
-# flow_mapping_entry: { ALIAS ANCHOR TAG SCALAR FLOW-SEQUENCE-START FLOW-MAPPING-START KEY }
-
-__all__ = ['Parser', 'ParserError']
-
-from .error import MarkedYAMLError
-from .tokens import *
-from .events import *
-from .scanner import *
-
-class ParserError(MarkedYAMLError):
- pass
-
-class Parser:
- # Since writing a recursive-descendant parser is a straightforward task, we
- # do not give many comments here.
-
- DEFAULT_TAGS = {
- '!': '!',
- '!!': 'tag:yaml.org,2002:',
- }
-
- def __init__(self):
- self.current_event = None
- self.yaml_version = None
- self.tag_handles = {}
- self.states = []
- self.marks = []
- self.state = self.parse_stream_start
-
- def dispose(self):
- # Reset the state attributes (to clear self-references)
- self.states = []
- self.state = None
-
- def check_event(self, *choices):
- # Check the type of the next event.
- if self.current_event is None:
- if self.state:
- self.current_event = self.state()
- if self.current_event is not None:
- if not choices:
- return True
- for choice in choices:
- if isinstance(self.current_event, choice):
- return True
- return False
-
- def peek_event(self):
- # Get the next event.
- if self.current_event is None:
- if self.state:
- self.current_event = self.state()
- return self.current_event
-
- def get_event(self):
- # Get the next event and proceed further.
- if self.current_event is None:
- if self.state:
- self.current_event = self.state()
- value = self.current_event
- self.current_event = None
- return value
-
- # stream ::= STREAM-START implicit_document? explicit_document* STREAM-END
- # implicit_document ::= block_node DOCUMENT-END*
- # explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
-
- def parse_stream_start(self):
-
- # Parse the stream start.
- token = self.get_token()
- event = StreamStartEvent(token.start_mark, token.end_mark,
- encoding=token.encoding)
-
- # Prepare the next state.
- self.state = self.parse_implicit_document_start
-
- return event
-
- def parse_implicit_document_start(self):
-
- # Parse an implicit document.
- if not self.check_token(DirectiveToken, DocumentStartToken,
- StreamEndToken):
- self.tag_handles = self.DEFAULT_TAGS
- token = self.peek_token()
- start_mark = end_mark = token.start_mark
- event = DocumentStartEvent(start_mark, end_mark,
- explicit=False)
-
- # Prepare the next state.
- self.states.append(self.parse_document_end)
- self.state = self.parse_block_node
-
- return event
-
- else:
- return self.parse_document_start()
-
- def parse_document_start(self):
-
- # Parse any extra document end indicators.
- while self.check_token(DocumentEndToken):
- self.get_token()
-
- # Parse an explicit document.
- if not self.check_token(StreamEndToken):
- token = self.peek_token()
- start_mark = token.start_mark
- version, tags = self.process_directives()
- if not self.check_token(DocumentStartToken):
- raise ParserError(None, None,
- "expected '<document start>', but found %r"
- % self.peek_token().id,
- self.peek_token().start_mark)
- token = self.get_token()
- end_mark = token.end_mark
- event = DocumentStartEvent(start_mark, end_mark,
- explicit=True, version=version, tags=tags)
- self.states.append(self.parse_document_end)
- self.state = self.parse_document_content
- else:
- # Parse the end of the stream.
- token = self.get_token()
- event = StreamEndEvent(token.start_mark, token.end_mark)
- assert not self.states
- assert not self.marks
- self.state = None
- return event
-
- def parse_document_end(self):
-
- # Parse the document end.
- token = self.peek_token()
- start_mark = end_mark = token.start_mark
- explicit = False
- if self.check_token(DocumentEndToken):
- token = self.get_token()
- end_mark = token.end_mark
- explicit = True
- event = DocumentEndEvent(start_mark, end_mark,
- explicit=explicit)
-
- # Prepare the next state.
- self.state = self.parse_document_start
-
- return event
-
- def parse_document_content(self):
- if self.check_token(DirectiveToken,
- DocumentStartToken, DocumentEndToken, StreamEndToken):
- event = self.process_empty_scalar(self.peek_token().start_mark)
- self.state = self.states.pop()
- return event
- else:
- return self.parse_block_node()
-
- def process_directives(self):
- self.yaml_version = None
- self.tag_handles = {}
- while self.check_token(DirectiveToken):
- token = self.get_token()
- if token.name == 'YAML':
- if self.yaml_version is not None:
- raise ParserError(None, None,
- "found duplicate YAML directive", token.start_mark)
- major, minor = token.value
- if major != 1:
- raise ParserError(None, None,
- "found incompatible YAML document (version 1.* is required)",
- token.start_mark)
- self.yaml_version = token.value
- elif token.name == 'TAG':
- handle, prefix = token.value
- if handle in self.tag_handles:
- raise ParserError(None, None,
- "duplicate tag handle %r" % handle,
- token.start_mark)
- self.tag_handles[handle] = prefix
- if self.tag_handles:
- value = self.yaml_version, self.tag_handles.copy()
- else:
- value = self.yaml_version, None
- for key in self.DEFAULT_TAGS:
- if key not in self.tag_handles:
- self.tag_handles[key] = self.DEFAULT_TAGS[key]
- return value
-
- # block_node_or_indentless_sequence ::= ALIAS
- # | properties (block_content | indentless_block_sequence)?
- # | block_content
- # | indentless_block_sequence
- # block_node ::= ALIAS
- # | properties block_content?
- # | block_content
- # flow_node ::= ALIAS
- # | properties flow_content?
- # | flow_content
- # properties ::= TAG ANCHOR? | ANCHOR TAG?
- # block_content ::= block_collection | flow_collection | SCALAR
- # flow_content ::= flow_collection | SCALAR
- # block_collection ::= block_sequence | block_mapping
- # flow_collection ::= flow_sequence | flow_mapping
-
- def parse_block_node(self):
- return self.parse_node(block=True)
-
- def parse_flow_node(self):
- return self.parse_node()
-
- def parse_block_node_or_indentless_sequence(self):
- return self.parse_node(block=True, indentless_sequence=True)
-
- def parse_node(self, block=False, indentless_sequence=False):
- if self.check_token(AliasToken):
- token = self.get_token()
- event = AliasEvent(token.value, token.start_mark, token.end_mark)
- self.state = self.states.pop()
- else:
- anchor = None
- tag = None
- start_mark = end_mark = tag_mark = None
- if self.check_token(AnchorToken):
- token = self.get_token()
- start_mark = token.start_mark
- end_mark = token.end_mark
- anchor = token.value
- if self.check_token(TagToken):
- token = self.get_token()
- tag_mark = token.start_mark
- end_mark = token.end_mark
- tag = token.value
- elif self.check_token(TagToken):
- token = self.get_token()
- start_mark = tag_mark = token.start_mark
- end_mark = token.end_mark
- tag = token.value
- if self.check_token(AnchorToken):
- token = self.get_token()
- end_mark = token.end_mark
- anchor = token.value
- if tag is not None:
- handle, suffix = tag
- if handle is not None:
- if handle not in self.tag_handles:
- raise ParserError("while parsing a node", start_mark,
- "found undefined tag handle %r" % handle,
- tag_mark)
- tag = self.tag_handles[handle]+suffix
- else:
- tag = suffix
- #if tag == '!':
- # raise ParserError("while parsing a node", start_mark,
- # "found non-specific tag '!'", tag_mark,
- # "Please check 'http://pyyaml.org/wiki/YAMLNonSpecificTag' and share your opinion.")
- if start_mark is None:
- start_mark = end_mark = self.peek_token().start_mark
- event = None
- implicit = (tag is None or tag == '!')
- if indentless_sequence and self.check_token(BlockEntryToken):
- end_mark = self.peek_token().end_mark
- event = SequenceStartEvent(anchor, tag, implicit,
- start_mark, end_mark)
- self.state = self.parse_indentless_sequence_entry
- else:
- if self.check_token(ScalarToken):
- token = self.get_token()
- end_mark = token.end_mark
- if (token.plain and tag is None) or tag == '!':
- implicit = (True, False)
- elif tag is None:
- implicit = (False, True)
- else:
- implicit = (False, False)
- event = ScalarEvent(anchor, tag, implicit, token.value,
- start_mark, end_mark, style=token.style)
- self.state = self.states.pop()
- elif self.check_token(FlowSequenceStartToken):
- end_mark = self.peek_token().end_mark
- event = SequenceStartEvent(anchor, tag, implicit,
- start_mark, end_mark, flow_style=True)
- self.state = self.parse_flow_sequence_first_entry
- elif self.check_token(FlowMappingStartToken):
- end_mark = self.peek_token().end_mark
- event = MappingStartEvent(anchor, tag, implicit,
- start_mark, end_mark, flow_style=True)
- self.state = self.parse_flow_mapping_first_key
- elif block and self.check_token(BlockSequenceStartToken):
- end_mark = self.peek_token().start_mark
- event = SequenceStartEvent(anchor, tag, implicit,
- start_mark, end_mark, flow_style=False)
- self.state = self.parse_block_sequence_first_entry
- elif block and self.check_token(BlockMappingStartToken):
- end_mark = self.peek_token().start_mark
- event = MappingStartEvent(anchor, tag, implicit,
- start_mark, end_mark, flow_style=False)
- self.state = self.parse_block_mapping_first_key
- elif anchor is not None or tag is not None:
- # Empty scalars are allowed even if a tag or an anchor is
- # specified.
- event = ScalarEvent(anchor, tag, (implicit, False), '',
- start_mark, end_mark)
- self.state = self.states.pop()
- else:
- if block:
- node = 'block'
- else:
- node = 'flow'
- token = self.peek_token()
- raise ParserError("while parsing a %s node" % node, start_mark,
- "expected the node content, but found %r" % token.id,
- token.start_mark)
- return event
-
- # block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END
-
- def parse_block_sequence_first_entry(self):
- token = self.get_token()
- self.marks.append(token.start_mark)
- return self.parse_block_sequence_entry()
-
- def parse_block_sequence_entry(self):
- if self.check_token(BlockEntryToken):
- token = self.get_token()
- if not self.check_token(BlockEntryToken, BlockEndToken):
- self.states.append(self.parse_block_sequence_entry)
- return self.parse_block_node()
- else:
- self.state = self.parse_block_sequence_entry
- return self.process_empty_scalar(token.end_mark)
- if not self.check_token(BlockEndToken):
- token = self.peek_token()
- raise ParserError("while parsing a block collection", self.marks[-1],
- "expected <block end>, but found %r" % token.id, token.start_mark)
- token = self.get_token()
- event = SequenceEndEvent(token.start_mark, token.end_mark)
- self.state = self.states.pop()
- self.marks.pop()
- return event
-
- # indentless_sequence ::= (BLOCK-ENTRY block_node?)+
-
- def parse_indentless_sequence_entry(self):
- if self.check_token(BlockEntryToken):
- token = self.get_token()
- if not self.check_token(BlockEntryToken,
- KeyToken, ValueToken, BlockEndToken):
- self.states.append(self.parse_indentless_sequence_entry)
- return self.parse_block_node()
- else:
- self.state = self.parse_indentless_sequence_entry
- return self.process_empty_scalar(token.end_mark)
- token = self.peek_token()
- event = SequenceEndEvent(token.start_mark, token.start_mark)
- self.state = self.states.pop()
- return event
-
- # block_mapping ::= BLOCK-MAPPING_START
- # ((KEY block_node_or_indentless_sequence?)?
- # (VALUE block_node_or_indentless_sequence?)?)*
- # BLOCK-END
-
- def parse_block_mapping_first_key(self):
- token = self.get_token()
- self.marks.append(token.start_mark)
- return self.parse_block_mapping_key()
-
- def parse_block_mapping_key(self):
- if self.check_token(KeyToken):
- token = self.get_token()
- if not self.check_token(KeyToken, ValueToken, BlockEndToken):
- self.states.append(self.parse_block_mapping_value)
- return self.parse_block_node_or_indentless_sequence()
- else:
- self.state = self.parse_block_mapping_value
- return self.process_empty_scalar(token.end_mark)
- if not self.check_token(BlockEndToken):
- token = self.peek_token()
- raise ParserError("while parsing a block mapping", self.marks[-1],
- "expected <block end>, but found %r" % token.id, token.start_mark)
- token = self.get_token()
- event = MappingEndEvent(token.start_mark, token.end_mark)
- self.state = self.states.pop()
- self.marks.pop()
- return event
-
- def parse_block_mapping_value(self):
- if self.check_token(ValueToken):
- token = self.get_token()
- if not self.check_token(KeyToken, ValueToken, BlockEndToken):
- self.states.append(self.parse_block_mapping_key)
- return self.parse_block_node_or_indentless_sequence()
- else:
- self.state = self.parse_block_mapping_key
- return self.process_empty_scalar(token.end_mark)
- else:
- self.state = self.parse_block_mapping_key
- token = self.peek_token()
- return self.process_empty_scalar(token.start_mark)
-
- # flow_sequence ::= FLOW-SEQUENCE-START
- # (flow_sequence_entry FLOW-ENTRY)*
- # flow_sequence_entry?
- # FLOW-SEQUENCE-END
- # flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
- #
- # Note that while production rules for both flow_sequence_entry and
- # flow_mapping_entry are equal, their interpretations are different.
- # For `flow_sequence_entry`, the part `KEY flow_node? (VALUE flow_node?)?`
- # generate an inline mapping (set syntax).
-
- def parse_flow_sequence_first_entry(self):
- token = self.get_token()
- self.marks.append(token.start_mark)
- return self.parse_flow_sequence_entry(first=True)
-
- def parse_flow_sequence_entry(self, first=False):
- if not self.check_token(FlowSequenceEndToken):
- if not first:
- if self.check_token(FlowEntryToken):
- self.get_token()
- else:
- token = self.peek_token()
- raise ParserError("while parsing a flow sequence", self.marks[-1],
- "expected ',' or ']', but got %r" % token.id, token.start_mark)
-
- if self.check_token(KeyToken):
- token = self.peek_token()
- event = MappingStartEvent(None, None, True,
- token.start_mark, token.end_mark,
- flow_style=True)
- self.state = self.parse_flow_sequence_entry_mapping_key
- return event
- elif not self.check_token(FlowSequenceEndToken):
- self.states.append(self.parse_flow_sequence_entry)
- return self.parse_flow_node()
- token = self.get_token()
- event = SequenceEndEvent(token.start_mark, token.end_mark)
- self.state = self.states.pop()
- self.marks.pop()
- return event
-
- def parse_flow_sequence_entry_mapping_key(self):
- token = self.get_token()
- if not self.check_token(ValueToken,
- FlowEntryToken, FlowSequenceEndToken):
- self.states.append(self.parse_flow_sequence_entry_mapping_value)
- return self.parse_flow_node()
- else:
- self.state = self.parse_flow_sequence_entry_mapping_value
- return self.process_empty_scalar(token.end_mark)
-
- def parse_flow_sequence_entry_mapping_value(self):
- if self.check_token(ValueToken):
- token = self.get_token()
- if not self.check_token(FlowEntryToken, FlowSequenceEndToken):
- self.states.append(self.parse_flow_sequence_entry_mapping_end)
- return self.parse_flow_node()
- else:
- self.state = self.parse_flow_sequence_entry_mapping_end
- return self.process_empty_scalar(token.end_mark)
- else:
- self.state = self.parse_flow_sequence_entry_mapping_end
- token = self.peek_token()
- return self.process_empty_scalar(token.start_mark)
-
- def parse_flow_sequence_entry_mapping_end(self):
- self.state = self.parse_flow_sequence_entry
- token = self.peek_token()
- return MappingEndEvent(token.start_mark, token.start_mark)
-
- # flow_mapping ::= FLOW-MAPPING-START
- # (flow_mapping_entry FLOW-ENTRY)*
- # flow_mapping_entry?
- # FLOW-MAPPING-END
- # flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
-
- def parse_flow_mapping_first_key(self):
- token = self.get_token()
- self.marks.append(token.start_mark)
- return self.parse_flow_mapping_key(first=True)
-
- def parse_flow_mapping_key(self, first=False):
- if not self.check_token(FlowMappingEndToken):
- if not first:
- if self.check_token(FlowEntryToken):
- self.get_token()
- else:
- token = self.peek_token()
- raise ParserError("while parsing a flow mapping", self.marks[-1],
- "expected ',' or '}', but got %r" % token.id, token.start_mark)
- if self.check_token(KeyToken):
- token = self.get_token()
- if not self.check_token(ValueToken,
- FlowEntryToken, FlowMappingEndToken):
- self.states.append(self.parse_flow_mapping_value)
- return self.parse_flow_node()
- else:
- self.state = self.parse_flow_mapping_value
- return self.process_empty_scalar(token.end_mark)
- elif not self.check_token(FlowMappingEndToken):
- self.states.append(self.parse_flow_mapping_empty_value)
- return self.parse_flow_node()
- token = self.get_token()
- event = MappingEndEvent(token.start_mark, token.end_mark)
- self.state = self.states.pop()
- self.marks.pop()
- return event
-
- def parse_flow_mapping_value(self):
- if self.check_token(ValueToken):
- token = self.get_token()
- if not self.check_token(FlowEntryToken, FlowMappingEndToken):
- self.states.append(self.parse_flow_mapping_key)
- return self.parse_flow_node()
- else:
- self.state = self.parse_flow_mapping_key
- return self.process_empty_scalar(token.end_mark)
- else:
- self.state = self.parse_flow_mapping_key
- token = self.peek_token()
- return self.process_empty_scalar(token.start_mark)
-
- def parse_flow_mapping_empty_value(self):
- self.state = self.parse_flow_mapping_key
- return self.process_empty_scalar(self.peek_token().start_mark)
-
- def process_empty_scalar(self, mark):
- return ScalarEvent(None, None, (True, False), '', mark, mark)
-
diff --git a/src/collectors/python.d.plugin/python_modules/pyyaml3/reader.py b/src/collectors/python.d.plugin/python_modules/pyyaml3/reader.py
deleted file mode 100644
index 0a515fd64..000000000
--- a/src/collectors/python.d.plugin/python_modules/pyyaml3/reader.py
+++ /dev/null
@@ -1,193 +0,0 @@
-# SPDX-License-Identifier: MIT
-# This module contains abstractions for the input stream. You don't have to
-# looks further, there are no pretty code.
-#
-# We define two classes here.
-#
-# Mark(source, line, column)
-# It's just a record and its only use is producing nice error messages.
-# Parser does not use it for any other purposes.
-#
-# Reader(source, data)
-# Reader determines the encoding of `data` and converts it to unicode.
-# Reader provides the following methods and attributes:
-# reader.peek(length=1) - return the next `length` characters
-# reader.forward(length=1) - move the current position to `length` characters.
-# reader.index - the number of the current character.
-# reader.line, stream.column - the line and the column of the current character.
-
-__all__ = ['Reader', 'ReaderError']
-
-from .error import YAMLError, Mark
-
-import codecs, re
-
-class ReaderError(YAMLError):
-
- def __init__(self, name, position, character, encoding, reason):
- self.name = name
- self.character = character
- self.position = position
- self.encoding = encoding
- self.reason = reason
-
- def __str__(self):
- if isinstance(self.character, bytes):
- return "'%s' codec can't decode byte #x%02x: %s\n" \
- " in \"%s\", position %d" \
- % (self.encoding, ord(self.character), self.reason,
- self.name, self.position)
- else:
- return "unacceptable character #x%04x: %s\n" \
- " in \"%s\", position %d" \
- % (self.character, self.reason,
- self.name, self.position)
-
-class Reader(object):
- # Reader:
- # - determines the data encoding and converts it to a unicode string,
- # - checks if characters are in allowed range,
- # - adds '\0' to the end.
-
- # Reader accepts
- # - a `bytes` object,
- # - a `str` object,
- # - a file-like object with its `read` method returning `str`,
- # - a file-like object with its `read` method returning `unicode`.
-
- # Yeah, it's ugly and slow.
-
- def __init__(self, stream):
- self.name = None
- self.stream = None
- self.stream_pointer = 0
- self.eof = True
- self.buffer = ''
- self.pointer = 0
- self.raw_buffer = None
- self.raw_decode = None
- self.encoding = None
- self.index = 0
- self.line = 0
- self.column = 0
- if isinstance(stream, str):
- self.name = "<unicode string>"
- self.check_printable(stream)
- self.buffer = stream+'\0'
- elif isinstance(stream, bytes):
- self.name = "<byte string>"
- self.raw_buffer = stream
- self.determine_encoding()
- else:
- self.stream = stream
- self.name = getattr(stream, 'name', "<file>")
- self.eof = False
- self.raw_buffer = None
- self.determine_encoding()
-
- def peek(self, index=0):
- try:
- return self.buffer[self.pointer+index]
- except IndexError:
- self.update(index+1)
- return self.buffer[self.pointer+index]
-
- def prefix(self, length=1):
- if self.pointer+length >= len(self.buffer):
- self.update(length)
- return self.buffer[self.pointer:self.pointer+length]
-
- def forward(self, length=1):
- if self.pointer+length+1 >= len(self.buffer):
- self.update(length+1)
- while length:
- ch = self.buffer[self.pointer]
- self.pointer += 1
- self.index += 1
- if ch in '\n\x85\u2028\u2029' \
- or (ch == '\r' and self.buffer[self.pointer] != '\n'):
- self.line += 1
- self.column = 0
- elif ch != '\uFEFF':
- self.column += 1
- length -= 1
-
- def get_mark(self):
- if self.stream is None:
- return Mark(self.name, self.index, self.line, self.column,
- self.buffer, self.pointer)
- else:
- return Mark(self.name, self.index, self.line, self.column,
- None, None)
-
- def determine_encoding(self):
- while not self.eof and (self.raw_buffer is None or len(self.raw_buffer) < 2):
- self.update_raw()
- if isinstance(self.raw_buffer, bytes):
- if self.raw_buffer.startswith(codecs.BOM_UTF16_LE):
- self.raw_decode = codecs.utf_16_le_decode
- self.encoding = 'utf-16-le'
- elif self.raw_buffer.startswith(codecs.BOM_UTF16_BE):
- self.raw_decode = codecs.utf_16_be_decode
- self.encoding = 'utf-16-be'
- else:
- self.raw_decode = codecs.utf_8_decode
- self.encoding = 'utf-8'
- self.update(1)
-
- NON_PRINTABLE = re.compile('[^\x09\x0A\x0D\x20-\x7E\x85\xA0-\uD7FF\uE000-\uFFFD]')
- def check_printable(self, data):
- match = self.NON_PRINTABLE.search(data)
- if match:
- character = match.group()
- position = self.index+(len(self.buffer)-self.pointer)+match.start()
- raise ReaderError(self.name, position, ord(character),
- 'unicode', "special characters are not allowed")
-
- def update(self, length):
- if self.raw_buffer is None:
- return
- self.buffer = self.buffer[self.pointer:]
- self.pointer = 0
- while len(self.buffer) < length:
- if not self.eof:
- self.update_raw()
- if self.raw_decode is not None:
- try:
- data, converted = self.raw_decode(self.raw_buffer,
- 'strict', self.eof)
- except UnicodeDecodeError as exc:
- character = self.raw_buffer[exc.start]
- if self.stream is not None:
- position = self.stream_pointer-len(self.raw_buffer)+exc.start
- else:
- position = exc.start
- raise ReaderError(self.name, position, character,
- exc.encoding, exc.reason)
- else:
- data = self.raw_buffer
- converted = len(data)
- self.check_printable(data)
- self.buffer += data
- self.raw_buffer = self.raw_buffer[converted:]
- if self.eof:
- self.buffer += '\0'
- self.raw_buffer = None
- break
-
- def update_raw(self, size=4096):
- data = self.stream.read(size)
- if self.raw_buffer is None:
- self.raw_buffer = data
- else:
- self.raw_buffer += data
- self.stream_pointer += len(data)
- if not data:
- self.eof = True
-
-#try:
-# import psyco
-# psyco.bind(Reader)
-#except ImportError:
-# pass
-
diff --git a/src/collectors/python.d.plugin/python_modules/pyyaml3/representer.py b/src/collectors/python.d.plugin/python_modules/pyyaml3/representer.py
deleted file mode 100644
index 756a18dcc..000000000
--- a/src/collectors/python.d.plugin/python_modules/pyyaml3/representer.py
+++ /dev/null
@@ -1,375 +0,0 @@
-# SPDX-License-Identifier: MIT
-
-__all__ = ['BaseRepresenter', 'SafeRepresenter', 'Representer',
- 'RepresenterError']
-
-from .error import *
-from .nodes import *
-
-import datetime, sys, copyreg, types, base64
-
-class RepresenterError(YAMLError):
- pass
-
-class BaseRepresenter:
-
- yaml_representers = {}
- yaml_multi_representers = {}
-
- def __init__(self, default_style=None, default_flow_style=None):
- self.default_style = default_style
- self.default_flow_style = default_flow_style
- self.represented_objects = {}
- self.object_keeper = []
- self.alias_key = None
-
- def represent(self, data):
- node = self.represent_data(data)
- self.serialize(node)
- self.represented_objects = {}
- self.object_keeper = []
- self.alias_key = None
-
- def represent_data(self, data):
- if self.ignore_aliases(data):
- self.alias_key = None
- else:
- self.alias_key = id(data)
- if self.alias_key is not None:
- if self.alias_key in self.represented_objects:
- node = self.represented_objects[self.alias_key]
- #if node is None:
- # raise RepresenterError("recursive objects are not allowed: %r" % data)
- return node
- #self.represented_objects[alias_key] = None
- self.object_keeper.append(data)
- data_types = type(data).__mro__
- if data_types[0] in self.yaml_representers:
- node = self.yaml_representers[data_types[0]](self, data)
- else:
- for data_type in data_types:
- if data_type in self.yaml_multi_representers:
- node = self.yaml_multi_representers[data_type](self, data)
- break
- else:
- if None in self.yaml_multi_representers:
- node = self.yaml_multi_representers[None](self, data)
- elif None in self.yaml_representers:
- node = self.yaml_representers[None](self, data)
- else:
- node = ScalarNode(None, str(data))
- #if alias_key is not None:
- # self.represented_objects[alias_key] = node
- return node
-
- @classmethod
- def add_representer(cls, data_type, representer):
- if not 'yaml_representers' in cls.__dict__:
- cls.yaml_representers = cls.yaml_representers.copy()
- cls.yaml_representers[data_type] = representer
-
- @classmethod
- def add_multi_representer(cls, data_type, representer):
- if not 'yaml_multi_representers' in cls.__dict__:
- cls.yaml_multi_representers = cls.yaml_multi_representers.copy()
- cls.yaml_multi_representers[data_type] = representer
-
- def represent_scalar(self, tag, value, style=None):
- if style is None:
- style = self.default_style
- node = ScalarNode(tag, value, style=style)
- if self.alias_key is not None:
- self.represented_objects[self.alias_key] = node
- return node
-
- def represent_sequence(self, tag, sequence, flow_style=None):
- value = []
- node = SequenceNode(tag, value, flow_style=flow_style)
- if self.alias_key is not None:
- self.represented_objects[self.alias_key] = node
- best_style = True
- for item in sequence:
- node_item = self.represent_data(item)
- if not (isinstance(node_item, ScalarNode) and not node_item.style):
- best_style = False
- value.append(node_item)
- if flow_style is None:
- if self.default_flow_style is not None:
- node.flow_style = self.default_flow_style
- else:
- node.flow_style = best_style
- return node
-
- def represent_mapping(self, tag, mapping, flow_style=None):
- value = []
- node = MappingNode(tag, value, flow_style=flow_style)
- if self.alias_key is not None:
- self.represented_objects[self.alias_key] = node
- best_style = True
- if hasattr(mapping, 'items'):
- mapping = list(mapping.items())
- try:
- mapping = sorted(mapping)
- except TypeError:
- pass
- for item_key, item_value in mapping:
- node_key = self.represent_data(item_key)
- node_value = self.represent_data(item_value)
- if not (isinstance(node_key, ScalarNode) and not node_key.style):
- best_style = False
- if not (isinstance(node_value, ScalarNode) and not node_value.style):
- best_style = False
- value.append((node_key, node_value))
- if flow_style is None:
- if self.default_flow_style is not None:
- node.flow_style = self.default_flow_style
- else:
- node.flow_style = best_style
- return node
-
- def ignore_aliases(self, data):
- return False
-
-class SafeRepresenter(BaseRepresenter):
-
- def ignore_aliases(self, data):
- if data in [None, ()]:
- return True
- if isinstance(data, (str, bytes, bool, int, float)):
- return True
-
- def represent_none(self, data):
- return self.represent_scalar('tag:yaml.org,2002:null', 'null')
-
- def represent_str(self, data):
- return self.represent_scalar('tag:yaml.org,2002:str', data)
-
- def represent_binary(self, data):
- if hasattr(base64, 'encodebytes'):
- data = base64.encodebytes(data).decode('ascii')
- else:
- data = base64.encodestring(data).decode('ascii')
- return self.represent_scalar('tag:yaml.org,2002:binary', data, style='|')
-
- def represent_bool(self, data):
- if data:
- value = 'true'
- else:
- value = 'false'
- return self.represent_scalar('tag:yaml.org,2002:bool', value)
-
- def represent_int(self, data):
- return self.represent_scalar('tag:yaml.org,2002:int', str(data))
-
- inf_value = 1e300
- while repr(inf_value) != repr(inf_value*inf_value):
- inf_value *= inf_value
-
- def represent_float(self, data):
- if data != data or (data == 0.0 and data == 1.0):
- value = '.nan'
- elif data == self.inf_value:
- value = '.inf'
- elif data == -self.inf_value:
- value = '-.inf'
- else:
- value = repr(data).lower()
- # Note that in some cases `repr(data)` represents a float number
- # without the decimal parts. For instance:
- # >>> repr(1e17)
- # '1e17'
- # Unfortunately, this is not a valid float representation according
- # to the definition of the `!!float` tag. We fix this by adding
- # '.0' before the 'e' symbol.
- if '.' not in value and 'e' in value:
- value = value.replace('e', '.0e', 1)
- return self.represent_scalar('tag:yaml.org,2002:float', value)
-
- def represent_list(self, data):
- #pairs = (len(data) > 0 and isinstance(data, list))
- #if pairs:
- # for item in data:
- # if not isinstance(item, tuple) or len(item) != 2:
- # pairs = False
- # break
- #if not pairs:
- return self.represent_sequence('tag:yaml.org,2002:seq', data)
- #value = []
- #for item_key, item_value in data:
- # value.append(self.represent_mapping(u'tag:yaml.org,2002:map',
- # [(item_key, item_value)]))
- #return SequenceNode(u'tag:yaml.org,2002:pairs', value)
-
- def represent_dict(self, data):
- return self.represent_mapping('tag:yaml.org,2002:map', data)
-
- def represent_set(self, data):
- value = {}
- for key in data:
- value[key] = None
- return self.represent_mapping('tag:yaml.org,2002:set', value)
-
- def represent_date(self, data):
- value = data.isoformat()
- return self.represent_scalar('tag:yaml.org,2002:timestamp', value)
-
- def represent_datetime(self, data):
- value = data.isoformat(' ')
- return self.represent_scalar('tag:yaml.org,2002:timestamp', value)
-
- def represent_yaml_object(self, tag, data, cls, flow_style=None):
- if hasattr(data, '__getstate__'):
- state = data.__getstate__()
- else:
- state = data.__dict__.copy()
- return self.represent_mapping(tag, state, flow_style=flow_style)
-
- def represent_undefined(self, data):
- raise RepresenterError("cannot represent an object: %s" % data)
-
-SafeRepresenter.add_representer(type(None),
- SafeRepresenter.represent_none)
-
-SafeRepresenter.add_representer(str,
- SafeRepresenter.represent_str)
-
-SafeRepresenter.add_representer(bytes,
- SafeRepresenter.represent_binary)
-
-SafeRepresenter.add_representer(bool,
- SafeRepresenter.represent_bool)
-
-SafeRepresenter.add_representer(int,
- SafeRepresenter.represent_int)
-
-SafeRepresenter.add_representer(float,
- SafeRepresenter.represent_float)
-
-SafeRepresenter.add_representer(list,
- SafeRepresenter.represent_list)
-
-SafeRepresenter.add_representer(tuple,
- SafeRepresenter.represent_list)
-
-SafeRepresenter.add_representer(dict,
- SafeRepresenter.represent_dict)
-
-SafeRepresenter.add_representer(set,
- SafeRepresenter.represent_set)
-
-SafeRepresenter.add_representer(datetime.date,
- SafeRepresenter.represent_date)
-
-SafeRepresenter.add_representer(datetime.datetime,
- SafeRepresenter.represent_datetime)
-
-SafeRepresenter.add_representer(None,
- SafeRepresenter.represent_undefined)
-
-class Representer(SafeRepresenter):
-
- def represent_complex(self, data):
- if data.imag == 0.0:
- data = '%r' % data.real
- elif data.real == 0.0:
- data = '%rj' % data.imag
- elif data.imag > 0:
- data = '%r+%rj' % (data.real, data.imag)
- else:
- data = '%r%rj' % (data.real, data.imag)
- return self.represent_scalar('tag:yaml.org,2002:python/complex', data)
-
- def represent_tuple(self, data):
- return self.represent_sequence('tag:yaml.org,2002:python/tuple', data)
-
- def represent_name(self, data):
- name = '%s.%s' % (data.__module__, data.__name__)
- return self.represent_scalar('tag:yaml.org,2002:python/name:'+name, '')
-
- def represent_module(self, data):
- return self.represent_scalar(
- 'tag:yaml.org,2002:python/module:'+data.__name__, '')
-
- def represent_object(self, data):
- # We use __reduce__ API to save the data. data.__reduce__ returns
- # a tuple of length 2-5:
- # (function, args, state, listitems, dictitems)
-
- # For reconstructing, we calls function(*args), then set its state,
- # listitems, and dictitems if they are not None.
-
- # A special case is when function.__name__ == '__newobj__'. In this
- # case we create the object with args[0].__new__(*args).
-
- # Another special case is when __reduce__ returns a string - we don't
- # support it.
-
- # We produce a !!python/object, !!python/object/new or
- # !!python/object/apply node.
-
- cls = type(data)
- if cls in copyreg.dispatch_table:
- reduce = copyreg.dispatch_table[cls](data)
- elif hasattr(data, '__reduce_ex__'):
- reduce = data.__reduce_ex__(2)
- elif hasattr(data, '__reduce__'):
- reduce = data.__reduce__()
- else:
- raise RepresenterError("cannot represent object: %r" % data)
- reduce = (list(reduce)+[None]*5)[:5]
- function, args, state, listitems, dictitems = reduce
- args = list(args)
- if state is None:
- state = {}
- if listitems is not None:
- listitems = list(listitems)
- if dictitems is not None:
- dictitems = dict(dictitems)
- if function.__name__ == '__newobj__':
- function = args[0]
- args = args[1:]
- tag = 'tag:yaml.org,2002:python/object/new:'
- newobj = True
- else:
- tag = 'tag:yaml.org,2002:python/object/apply:'
- newobj = False
- function_name = '%s.%s' % (function.__module__, function.__name__)
- if not args and not listitems and not dictitems \
- and isinstance(state, dict) and newobj:
- return self.represent_mapping(
- 'tag:yaml.org,2002:python/object:'+function_name, state)
- if not listitems and not dictitems \
- and isinstance(state, dict) and not state:
- return self.represent_sequence(tag+function_name, args)
- value = {}
- if args:
- value['args'] = args
- if state or not isinstance(state, dict):
- value['state'] = state
- if listitems:
- value['listitems'] = listitems
- if dictitems:
- value['dictitems'] = dictitems
- return self.represent_mapping(tag+function_name, value)
-
-Representer.add_representer(complex,
- Representer.represent_complex)
-
-Representer.add_representer(tuple,
- Representer.represent_tuple)
-
-Representer.add_representer(type,
- Representer.represent_name)
-
-Representer.add_representer(types.FunctionType,
- Representer.represent_name)
-
-Representer.add_representer(types.BuiltinFunctionType,
- Representer.represent_name)
-
-Representer.add_representer(types.ModuleType,
- Representer.represent_module)
-
-Representer.add_multi_representer(object,
- Representer.represent_object)
-
diff --git a/src/collectors/python.d.plugin/python_modules/pyyaml3/resolver.py b/src/collectors/python.d.plugin/python_modules/pyyaml3/resolver.py
deleted file mode 100644
index 50945e04d..000000000
--- a/src/collectors/python.d.plugin/python_modules/pyyaml3/resolver.py
+++ /dev/null
@@ -1,225 +0,0 @@
-# SPDX-License-Identifier: MIT
-
-__all__ = ['BaseResolver', 'Resolver']
-
-from .error import *
-from .nodes import *
-
-import re
-
-class ResolverError(YAMLError):
- pass
-
-class BaseResolver:
-
- DEFAULT_SCALAR_TAG = 'tag:yaml.org,2002:str'
- DEFAULT_SEQUENCE_TAG = 'tag:yaml.org,2002:seq'
- DEFAULT_MAPPING_TAG = 'tag:yaml.org,2002:map'
-
- yaml_implicit_resolvers = {}
- yaml_path_resolvers = {}
-
- def __init__(self):
- self.resolver_exact_paths = []
- self.resolver_prefix_paths = []
-
- @classmethod
- def add_implicit_resolver(cls, tag, regexp, first):
- if not 'yaml_implicit_resolvers' in cls.__dict__:
- cls.yaml_implicit_resolvers = cls.yaml_implicit_resolvers.copy()
- if first is None:
- first = [None]
- for ch in first:
- cls.yaml_implicit_resolvers.setdefault(ch, []).append((tag, regexp))
-
- @classmethod
- def add_path_resolver(cls, tag, path, kind=None):
- # Note: `add_path_resolver` is experimental. The API could be changed.
- # `new_path` is a pattern that is matched against the path from the
- # root to the node that is being considered. `node_path` elements are
- # tuples `(node_check, index_check)`. `node_check` is a node class:
- # `ScalarNode`, `SequenceNode`, `MappingNode` or `None`. `None`
- # matches any kind of a node. `index_check` could be `None`, a boolean
- # value, a string value, or a number. `None` and `False` match against
- # any _value_ of sequence and mapping nodes. `True` matches against
- # any _key_ of a mapping node. A string `index_check` matches against
- # a mapping value that corresponds to a scalar key which content is
- # equal to the `index_check` value. An integer `index_check` matches
- # against a sequence value with the index equal to `index_check`.
- if not 'yaml_path_resolvers' in cls.__dict__:
- cls.yaml_path_resolvers = cls.yaml_path_resolvers.copy()
- new_path = []
- for element in path:
- if isinstance(element, (list, tuple)):
- if len(element) == 2:
- node_check, index_check = element
- elif len(element) == 1:
- node_check = element[0]
- index_check = True
- else:
- raise ResolverError("Invalid path element: %s" % element)
- else:
- node_check = None
- index_check = element
- if node_check is str:
- node_check = ScalarNode
- elif node_check is list:
- node_check = SequenceNode
- elif node_check is dict:
- node_check = MappingNode
- elif node_check not in [ScalarNode, SequenceNode, MappingNode] \
- and not isinstance(node_check, str) \
- and node_check is not None:
- raise ResolverError("Invalid node checker: %s" % node_check)
- if not isinstance(index_check, (str, int)) \
- and index_check is not None:
- raise ResolverError("Invalid index checker: %s" % index_check)
- new_path.append((node_check, index_check))
- if kind is str:
- kind = ScalarNode
- elif kind is list:
- kind = SequenceNode
- elif kind is dict:
- kind = MappingNode
- elif kind not in [ScalarNode, SequenceNode, MappingNode] \
- and kind is not None:
- raise ResolverError("Invalid node kind: %s" % kind)
- cls.yaml_path_resolvers[tuple(new_path), kind] = tag
-
- def descend_resolver(self, current_node, current_index):
- if not self.yaml_path_resolvers:
- return
- exact_paths = {}
- prefix_paths = []
- if current_node:
- depth = len(self.resolver_prefix_paths)
- for path, kind in self.resolver_prefix_paths[-1]:
- if self.check_resolver_prefix(depth, path, kind,
- current_node, current_index):
- if len(path) > depth:
- prefix_paths.append((path, kind))
- else:
- exact_paths[kind] = self.yaml_path_resolvers[path, kind]
- else:
- for path, kind in self.yaml_path_resolvers:
- if not path:
- exact_paths[kind] = self.yaml_path_resolvers[path, kind]
- else:
- prefix_paths.append((path, kind))
- self.resolver_exact_paths.append(exact_paths)
- self.resolver_prefix_paths.append(prefix_paths)
-
- def ascend_resolver(self):
- if not self.yaml_path_resolvers:
- return
- self.resolver_exact_paths.pop()
- self.resolver_prefix_paths.pop()
-
- def check_resolver_prefix(self, depth, path, kind,
- current_node, current_index):
- node_check, index_check = path[depth-1]
- if isinstance(node_check, str):
- if current_node.tag != node_check:
- return
- elif node_check is not None:
- if not isinstance(current_node, node_check):
- return
- if index_check is True and current_index is not None:
- return
- if (index_check is False or index_check is None) \
- and current_index is None:
- return
- if isinstance(index_check, str):
- if not (isinstance(current_index, ScalarNode)
- and index_check == current_index.value):
- return
- elif isinstance(index_check, int) and not isinstance(index_check, bool):
- if index_check != current_index:
- return
- return True
-
- def resolve(self, kind, value, implicit):
- if kind is ScalarNode and implicit[0]:
- if value == '':
- resolvers = self.yaml_implicit_resolvers.get('', [])
- else:
- resolvers = self.yaml_implicit_resolvers.get(value[0], [])
- resolvers += self.yaml_implicit_resolvers.get(None, [])
- for tag, regexp in resolvers:
- if regexp.match(value):
- return tag
- implicit = implicit[1]
- if self.yaml_path_resolvers:
- exact_paths = self.resolver_exact_paths[-1]
- if kind in exact_paths:
- return exact_paths[kind]
- if None in exact_paths:
- return exact_paths[None]
- if kind is ScalarNode:
- return self.DEFAULT_SCALAR_TAG
- elif kind is SequenceNode:
- return self.DEFAULT_SEQUENCE_TAG
- elif kind is MappingNode:
- return self.DEFAULT_MAPPING_TAG
-
-class Resolver(BaseResolver):
- pass
-
-Resolver.add_implicit_resolver(
- 'tag:yaml.org,2002:bool',
- re.compile(r'''^(?:yes|Yes|YES|no|No|NO
- |true|True|TRUE|false|False|FALSE
- |on|On|ON|off|Off|OFF)$''', re.X),
- list('yYnNtTfFoO'))
-
-Resolver.add_implicit_resolver(
- 'tag:yaml.org,2002:float',
- re.compile(r'''^(?:[-+]?(?:[0-9][0-9_]*)\.[0-9_]*(?:[eE][-+][0-9]+)?
- |\.[0-9_]+(?:[eE][-+][0-9]+)?
- |[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+\.[0-9_]*
- |[-+]?\.(?:inf|Inf|INF)
- |\.(?:nan|NaN|NAN))$''', re.X),
- list('-+0123456789.'))
-
-Resolver.add_implicit_resolver(
- 'tag:yaml.org,2002:int',
- re.compile(r'''^(?:[-+]?0b[0-1_]+
- |[-+]?0[0-7_]+
- |[-+]?(?:0|[1-9][0-9_]*)
- |[-+]?0x[0-9a-fA-F_]+
- |[-+]?[1-9][0-9_]*(?::[0-5]?[0-9])+)$''', re.X),
- list('-+0123456789'))
-
-Resolver.add_implicit_resolver(
- 'tag:yaml.org,2002:merge',
- re.compile(r'^(?:<<)$'),
- ['<'])
-
-Resolver.add_implicit_resolver(
- 'tag:yaml.org,2002:null',
- re.compile(r'''^(?: ~
- |null|Null|NULL
- | )$''', re.X),
- ['~', 'n', 'N', ''])
-
-Resolver.add_implicit_resolver(
- 'tag:yaml.org,2002:timestamp',
- re.compile(r'''^(?:[0-9][0-9][0-9][0-9]-[0-9][0-9]-[0-9][0-9]
- |[0-9][0-9][0-9][0-9] -[0-9][0-9]? -[0-9][0-9]?
- (?:[Tt]|[ \t]+)[0-9][0-9]?
- :[0-9][0-9] :[0-9][0-9] (?:\.[0-9]*)?
- (?:[ \t]*(?:Z|[-+][0-9][0-9]?(?::[0-9][0-9])?))?)$''', re.X),
- list('0123456789'))
-
-Resolver.add_implicit_resolver(
- 'tag:yaml.org,2002:value',
- re.compile(r'^(?:=)$'),
- ['='])
-
-# The following resolver is only for documentation purposes. It cannot work
-# because plain scalars cannot start with '!', '&', or '*'.
-Resolver.add_implicit_resolver(
- 'tag:yaml.org,2002:yaml',
- re.compile(r'^(?:!|&|\*)$'),
- list('!&*'))
-
diff --git a/src/collectors/python.d.plugin/python_modules/pyyaml3/scanner.py b/src/collectors/python.d.plugin/python_modules/pyyaml3/scanner.py
deleted file mode 100644
index b55854e8b..000000000
--- a/src/collectors/python.d.plugin/python_modules/pyyaml3/scanner.py
+++ /dev/null
@@ -1,1449 +0,0 @@
-# SPDX-License-Identifier: MIT
-
-# Scanner produces tokens of the following types:
-# STREAM-START
-# STREAM-END
-# DIRECTIVE(name, value)
-# DOCUMENT-START
-# DOCUMENT-END
-# BLOCK-SEQUENCE-START
-# BLOCK-MAPPING-START
-# BLOCK-END
-# FLOW-SEQUENCE-START
-# FLOW-MAPPING-START
-# FLOW-SEQUENCE-END
-# FLOW-MAPPING-END
-# BLOCK-ENTRY
-# FLOW-ENTRY
-# KEY
-# VALUE
-# ALIAS(value)
-# ANCHOR(value)
-# TAG(value)
-# SCALAR(value, plain, style)
-#
-# Read comments in the Scanner code for more details.
-#
-
-__all__ = ['Scanner', 'ScannerError']
-
-from .error import MarkedYAMLError
-from .tokens import *
-
-class ScannerError(MarkedYAMLError):
- pass
-
-class SimpleKey:
- # See below simple keys treatment.
-
- def __init__(self, token_number, required, index, line, column, mark):
- self.token_number = token_number
- self.required = required
- self.index = index
- self.line = line
- self.column = column
- self.mark = mark
-
-class Scanner:
-
- def __init__(self):
- """Initialize the scanner."""
- # It is assumed that Scanner and Reader will have a common descendant.
- # Reader do the dirty work of checking for BOM and converting the
- # input data to Unicode. It also adds NUL to the end.
- #
- # Reader supports the following methods
- # self.peek(i=0) # peek the next i-th character
- # self.prefix(l=1) # peek the next l characters
- # self.forward(l=1) # read the next l characters and move the pointer.
-
- # Had we reached the end of the stream?
- self.done = False
-
- # The number of unclosed '{' and '['. `flow_level == 0` means block
- # context.
- self.flow_level = 0
-
- # List of processed tokens that are not yet emitted.
- self.tokens = []
-
- # Add the STREAM-START token.
- self.fetch_stream_start()
-
- # Number of tokens that were emitted through the `get_token` method.
- self.tokens_taken = 0
-
- # The current indentation level.
- self.indent = -1
-
- # Past indentation levels.
- self.indents = []
-
- # Variables related to simple keys treatment.
-
- # A simple key is a key that is not denoted by the '?' indicator.
- # Example of simple keys:
- # ---
- # block simple key: value
- # ? not a simple key:
- # : { flow simple key: value }
- # We emit the KEY token before all keys, so when we find a potential
- # simple key, we try to locate the corresponding ':' indicator.
- # Simple keys should be limited to a single line and 1024 characters.
-
- # Can a simple key start at the current position? A simple key may
- # start:
- # - at the beginning of the line, not counting indentation spaces
- # (in block context),
- # - after '{', '[', ',' (in the flow context),
- # - after '?', ':', '-' (in the block context).
- # In the block context, this flag also signifies if a block collection
- # may start at the current position.
- self.allow_simple_key = True
-
- # Keep track of possible simple keys. This is a dictionary. The key
- # is `flow_level`; there can be no more that one possible simple key
- # for each level. The value is a SimpleKey record:
- # (token_number, required, index, line, column, mark)
- # A simple key may start with ALIAS, ANCHOR, TAG, SCALAR(flow),
- # '[', or '{' tokens.
- self.possible_simple_keys = {}
-
- # Public methods.
-
- def check_token(self, *choices):
- # Check if the next token is one of the given types.
- while self.need_more_tokens():
- self.fetch_more_tokens()
- if self.tokens:
- if not choices:
- return True
- for choice in choices:
- if isinstance(self.tokens[0], choice):
- return True
- return False
-
- def peek_token(self):
- # Return the next token, but do not delete if from the queue.
- while self.need_more_tokens():
- self.fetch_more_tokens()
- if self.tokens:
- return self.tokens[0]
-
- def get_token(self):
- # Return the next token.
- while self.need_more_tokens():
- self.fetch_more_tokens()
- if self.tokens:
- self.tokens_taken += 1
- return self.tokens.pop(0)
-
- # Private methods.
-
- def need_more_tokens(self):
- if self.done:
- return False
- if not self.tokens:
- return True
- # The current token may be a potential simple key, so we
- # need to look further.
- self.stale_possible_simple_keys()
- if self.next_possible_simple_key() == self.tokens_taken:
- return True
-
- def fetch_more_tokens(self):
-
- # Eat whitespaces and comments until we reach the next token.
- self.scan_to_next_token()
-
- # Remove obsolete possible simple keys.
- self.stale_possible_simple_keys()
-
- # Compare the current indentation and column. It may add some tokens
- # and decrease the current indentation level.
- self.unwind_indent(self.column)
-
- # Peek the next character.
- ch = self.peek()
-
- # Is it the end of stream?
- if ch == '\0':
- return self.fetch_stream_end()
-
- # Is it a directive?
- if ch == '%' and self.check_directive():
- return self.fetch_directive()
-
- # Is it the document start?
- if ch == '-' and self.check_document_start():
- return self.fetch_document_start()
-
- # Is it the document end?
- if ch == '.' and self.check_document_end():
- return self.fetch_document_end()
-
- # TODO: support for BOM within a stream.
- #if ch == '\uFEFF':
- # return self.fetch_bom() <-- issue BOMToken
-
- # Note: the order of the following checks is NOT significant.
-
- # Is it the flow sequence start indicator?
- if ch == '[':
- return self.fetch_flow_sequence_start()
-
- # Is it the flow mapping start indicator?
- if ch == '{':
- return self.fetch_flow_mapping_start()
-
- # Is it the flow sequence end indicator?
- if ch == ']':
- return self.fetch_flow_sequence_end()
-
- # Is it the flow mapping end indicator?
- if ch == '}':
- return self.fetch_flow_mapping_end()
-
- # Is it the flow entry indicator?
- if ch == ',':
- return self.fetch_flow_entry()
-
- # Is it the block entry indicator?
- if ch == '-' and self.check_block_entry():
- return self.fetch_block_entry()
-
- # Is it the key indicator?
- if ch == '?' and self.check_key():
- return self.fetch_key()
-
- # Is it the value indicator?
- if ch == ':' and self.check_value():
- return self.fetch_value()
-
- # Is it an alias?
- if ch == '*':
- return self.fetch_alias()
-
- # Is it an anchor?
- if ch == '&':
- return self.fetch_anchor()
-
- # Is it a tag?
- if ch == '!':
- return self.fetch_tag()
-
- # Is it a literal scalar?
- if ch == '|' and not self.flow_level:
- return self.fetch_literal()
-
- # Is it a folded scalar?
- if ch == '>' and not self.flow_level:
- return self.fetch_folded()
-
- # Is it a single quoted scalar?
- if ch == '\'':
- return self.fetch_single()
-
- # Is it a double quoted scalar?
- if ch == '\"':
- return self.fetch_double()
-
- # It must be a plain scalar then.
- if self.check_plain():
- return self.fetch_plain()
-
- # No? It's an error. Let's produce a nice error message.
- raise ScannerError("while scanning for the next token", None,
- "found character %r that cannot start any token" % ch,
- self.get_mark())
-
- # Simple keys treatment.
-
- def next_possible_simple_key(self):
- # Return the number of the nearest possible simple key. Actually we
- # don't need to loop through the whole dictionary. We may replace it
- # with the following code:
- # if not self.possible_simple_keys:
- # return None
- # return self.possible_simple_keys[
- # min(self.possible_simple_keys.keys())].token_number
- min_token_number = None
- for level in self.possible_simple_keys:
- key = self.possible_simple_keys[level]
- if min_token_number is None or key.token_number < min_token_number:
- min_token_number = key.token_number
- return min_token_number
-
- def stale_possible_simple_keys(self):
- # Remove entries that are no longer possible simple keys. According to
- # the YAML specification, simple keys
- # - should be limited to a single line,
- # - should be no longer than 1024 characters.
- # Disabling this procedure will allow simple keys of any length and
- # height (may cause problems if indentation is broken though).
- for level in list(self.possible_simple_keys):
- key = self.possible_simple_keys[level]
- if key.line != self.line \
- or self.index-key.index > 1024:
- if key.required:
- raise ScannerError("while scanning a simple key", key.mark,
- "could not found expected ':'", self.get_mark())
- del self.possible_simple_keys[level]
-
- def save_possible_simple_key(self):
- # The next token may start a simple key. We check if it's possible
- # and save its position. This function is called for
- # ALIAS, ANCHOR, TAG, SCALAR(flow), '[', and '{'.
-
- # Check if a simple key is required at the current position.
- required = not self.flow_level and self.indent == self.column
-
- # A simple key is required only if it is the first token in the current
- # line. Therefore it is always allowed.
- assert self.allow_simple_key or not required
-
- # The next token might be a simple key. Let's save it's number and
- # position.
- if self.allow_simple_key:
- self.remove_possible_simple_key()
- token_number = self.tokens_taken+len(self.tokens)
- key = SimpleKey(token_number, required,
- self.index, self.line, self.column, self.get_mark())
- self.possible_simple_keys[self.flow_level] = key
-
- def remove_possible_simple_key(self):
- # Remove the saved possible key position at the current flow level.
- if self.flow_level in self.possible_simple_keys:
- key = self.possible_simple_keys[self.flow_level]
-
- if key.required:
- raise ScannerError("while scanning a simple key", key.mark,
- "could not found expected ':'", self.get_mark())
-
- del self.possible_simple_keys[self.flow_level]
-
- # Indentation functions.
-
- def unwind_indent(self, column):
-
- ## In flow context, tokens should respect indentation.
- ## Actually the condition should be `self.indent >= column` according to
- ## the spec. But this condition will prohibit intuitively correct
- ## constructions such as
- ## key : {
- ## }
- #if self.flow_level and self.indent > column:
- # raise ScannerError(None, None,
- # "invalid intendation or unclosed '[' or '{'",
- # self.get_mark())
-
- # In the flow context, indentation is ignored. We make the scanner less
- # restrictive then specification requires.
- if self.flow_level:
- return
-
- # In block context, we may need to issue the BLOCK-END tokens.
- while self.indent > column:
- mark = self.get_mark()
- self.indent = self.indents.pop()
- self.tokens.append(BlockEndToken(mark, mark))
-
- def add_indent(self, column):
- # Check if we need to increase indentation.
- if self.indent < column:
- self.indents.append(self.indent)
- self.indent = column
- return True
- return False
-
- # Fetchers.
-
- def fetch_stream_start(self):
- # We always add STREAM-START as the first token and STREAM-END as the
- # last token.
-
- # Read the token.
- mark = self.get_mark()
-
- # Add STREAM-START.
- self.tokens.append(StreamStartToken(mark, mark,
- encoding=self.encoding))
-
-
- def fetch_stream_end(self):
-
- # Set the current intendation to -1.
- self.unwind_indent(-1)
-
- # Reset simple keys.
- self.remove_possible_simple_key()
- self.allow_simple_key = False
- self.possible_simple_keys = {}
-
- # Read the token.
- mark = self.get_mark()
-
- # Add STREAM-END.
- self.tokens.append(StreamEndToken(mark, mark))
-
- # The steam is finished.
- self.done = True
-
- def fetch_directive(self):
-
- # Set the current intendation to -1.
- self.unwind_indent(-1)
-
- # Reset simple keys.
- self.remove_possible_simple_key()
- self.allow_simple_key = False
-
- # Scan and add DIRECTIVE.
- self.tokens.append(self.scan_directive())
-
- def fetch_document_start(self):
- self.fetch_document_indicator(DocumentStartToken)
-
- def fetch_document_end(self):
- self.fetch_document_indicator(DocumentEndToken)
-
- def fetch_document_indicator(self, TokenClass):
-
- # Set the current intendation to -1.
- self.unwind_indent(-1)
-
- # Reset simple keys. Note that there could not be a block collection
- # after '---'.
- self.remove_possible_simple_key()
- self.allow_simple_key = False
-
- # Add DOCUMENT-START or DOCUMENT-END.
- start_mark = self.get_mark()
- self.forward(3)
- end_mark = self.get_mark()
- self.tokens.append(TokenClass(start_mark, end_mark))
-
- def fetch_flow_sequence_start(self):
- self.fetch_flow_collection_start(FlowSequenceStartToken)
-
- def fetch_flow_mapping_start(self):
- self.fetch_flow_collection_start(FlowMappingStartToken)
-
- def fetch_flow_collection_start(self, TokenClass):
-
- # '[' and '{' may start a simple key.
- self.save_possible_simple_key()
-
- # Increase the flow level.
- self.flow_level += 1
-
- # Simple keys are allowed after '[' and '{'.
- self.allow_simple_key = True
-
- # Add FLOW-SEQUENCE-START or FLOW-MAPPING-START.
- start_mark = self.get_mark()
- self.forward()
- end_mark = self.get_mark()
- self.tokens.append(TokenClass(start_mark, end_mark))
-
- def fetch_flow_sequence_end(self):
- self.fetch_flow_collection_end(FlowSequenceEndToken)
-
- def fetch_flow_mapping_end(self):
- self.fetch_flow_collection_end(FlowMappingEndToken)
-
- def fetch_flow_collection_end(self, TokenClass):
-
- # Reset possible simple key on the current level.
- self.remove_possible_simple_key()
-
- # Decrease the flow level.
- self.flow_level -= 1
-
- # No simple keys after ']' or '}'.
- self.allow_simple_key = False
-
- # Add FLOW-SEQUENCE-END or FLOW-MAPPING-END.
- start_mark = self.get_mark()
- self.forward()
- end_mark = self.get_mark()
- self.tokens.append(TokenClass(start_mark, end_mark))
-
- def fetch_flow_entry(self):
-
- # Simple keys are allowed after ','.
- self.allow_simple_key = True
-
- # Reset possible simple key on the current level.
- self.remove_possible_simple_key()
-
- # Add FLOW-ENTRY.
- start_mark = self.get_mark()
- self.forward()
- end_mark = self.get_mark()
- self.tokens.append(FlowEntryToken(start_mark, end_mark))
-
- def fetch_block_entry(self):
-
- # Block context needs additional checks.
- if not self.flow_level:
-
- # Are we allowed to start a new entry?
- if not self.allow_simple_key:
- raise ScannerError(None, None,
- "sequence entries are not allowed here",
- self.get_mark())
-
- # We may need to add BLOCK-SEQUENCE-START.
- if self.add_indent(self.column):
- mark = self.get_mark()
- self.tokens.append(BlockSequenceStartToken(mark, mark))
-
- # It's an error for the block entry to occur in the flow context,
- # but we let the parser detect this.
- else:
- pass
-
- # Simple keys are allowed after '-'.
- self.allow_simple_key = True
-
- # Reset possible simple key on the current level.
- self.remove_possible_simple_key()
-
- # Add BLOCK-ENTRY.
- start_mark = self.get_mark()
- self.forward()
- end_mark = self.get_mark()
- self.tokens.append(BlockEntryToken(start_mark, end_mark))
-
- def fetch_key(self):
-
- # Block context needs additional checks.
- if not self.flow_level:
-
- # Are we allowed to start a key (not nessesary a simple)?
- if not self.allow_simple_key:
- raise ScannerError(None, None,
- "mapping keys are not allowed here",
- self.get_mark())
-
- # We may need to add BLOCK-MAPPING-START.
- if self.add_indent(self.column):
- mark = self.get_mark()
- self.tokens.append(BlockMappingStartToken(mark, mark))
-
- # Simple keys are allowed after '?' in the block context.
- self.allow_simple_key = not self.flow_level
-
- # Reset possible simple key on the current level.
- self.remove_possible_simple_key()
-
- # Add KEY.
- start_mark = self.get_mark()
- self.forward()
- end_mark = self.get_mark()
- self.tokens.append(KeyToken(start_mark, end_mark))
-
- def fetch_value(self):
-
- # Do we determine a simple key?
- if self.flow_level in self.possible_simple_keys:
-
- # Add KEY.
- key = self.possible_simple_keys[self.flow_level]
- del self.possible_simple_keys[self.flow_level]
- self.tokens.insert(key.token_number-self.tokens_taken,
- KeyToken(key.mark, key.mark))
-
- # If this key starts a new block mapping, we need to add
- # BLOCK-MAPPING-START.
- if not self.flow_level:
- if self.add_indent(key.column):
- self.tokens.insert(key.token_number-self.tokens_taken,
- BlockMappingStartToken(key.mark, key.mark))
-
- # There cannot be two simple keys one after another.
- self.allow_simple_key = False
-
- # It must be a part of a complex key.
- else:
-
- # Block context needs additional checks.
- # (Do we really need them? They will be catched by the parser
- # anyway.)
- if not self.flow_level:
-
- # We are allowed to start a complex value if and only if
- # we can start a simple key.
- if not self.allow_simple_key:
- raise ScannerError(None, None,
- "mapping values are not allowed here",
- self.get_mark())
-
- # If this value starts a new block mapping, we need to add
- # BLOCK-MAPPING-START. It will be detected as an error later by
- # the parser.
- if not self.flow_level:
- if self.add_indent(self.column):
- mark = self.get_mark()
- self.tokens.append(BlockMappingStartToken(mark, mark))
-
- # Simple keys are allowed after ':' in the block context.
- self.allow_simple_key = not self.flow_level
-
- # Reset possible simple key on the current level.
- self.remove_possible_simple_key()
-
- # Add VALUE.
- start_mark = self.get_mark()
- self.forward()
- end_mark = self.get_mark()
- self.tokens.append(ValueToken(start_mark, end_mark))
-
- def fetch_alias(self):
-
- # ALIAS could be a simple key.
- self.save_possible_simple_key()
-
- # No simple keys after ALIAS.
- self.allow_simple_key = False
-
- # Scan and add ALIAS.
- self.tokens.append(self.scan_anchor(AliasToken))
-
- def fetch_anchor(self):
-
- # ANCHOR could start a simple key.
- self.save_possible_simple_key()
-
- # No simple keys after ANCHOR.
- self.allow_simple_key = False
-
- # Scan and add ANCHOR.
- self.tokens.append(self.scan_anchor(AnchorToken))
-
- def fetch_tag(self):
-
- # TAG could start a simple key.
- self.save_possible_simple_key()
-
- # No simple keys after TAG.
- self.allow_simple_key = False
-
- # Scan and add TAG.
- self.tokens.append(self.scan_tag())
-
- def fetch_literal(self):
- self.fetch_block_scalar(style='|')
-
- def fetch_folded(self):
- self.fetch_block_scalar(style='>')
-
- def fetch_block_scalar(self, style):
-
- # A simple key may follow a block scalar.
- self.allow_simple_key = True
-
- # Reset possible simple key on the current level.
- self.remove_possible_simple_key()
-
- # Scan and add SCALAR.
- self.tokens.append(self.scan_block_scalar(style))
-
- def fetch_single(self):
- self.fetch_flow_scalar(style='\'')
-
- def fetch_double(self):
- self.fetch_flow_scalar(style='"')
-
- def fetch_flow_scalar(self, style):
-
- # A flow scalar could be a simple key.
- self.save_possible_simple_key()
-
- # No simple keys after flow scalars.
- self.allow_simple_key = False
-
- # Scan and add SCALAR.
- self.tokens.append(self.scan_flow_scalar(style))
-
- def fetch_plain(self):
-
- # A plain scalar could be a simple key.
- self.save_possible_simple_key()
-
- # No simple keys after plain scalars. But note that `scan_plain` will
- # change this flag if the scan is finished at the beginning of the
- # line.
- self.allow_simple_key = False
-
- # Scan and add SCALAR. May change `allow_simple_key`.
- self.tokens.append(self.scan_plain())
-
- # Checkers.
-
- def check_directive(self):
-
- # DIRECTIVE: ^ '%' ...
- # The '%' indicator is already checked.
- if self.column == 0:
- return True
-
- def check_document_start(self):
-
- # DOCUMENT-START: ^ '---' (' '|'\n')
- if self.column == 0:
- if self.prefix(3) == '---' \
- and self.peek(3) in '\0 \t\r\n\x85\u2028\u2029':
- return True
-
- def check_document_end(self):
-
- # DOCUMENT-END: ^ '...' (' '|'\n')
- if self.column == 0:
- if self.prefix(3) == '...' \
- and self.peek(3) in '\0 \t\r\n\x85\u2028\u2029':
- return True
-
- def check_block_entry(self):
-
- # BLOCK-ENTRY: '-' (' '|'\n')
- return self.peek(1) in '\0 \t\r\n\x85\u2028\u2029'
-
- def check_key(self):
-
- # KEY(flow context): '?'
- if self.flow_level:
- return True
-
- # KEY(block context): '?' (' '|'\n')
- else:
- return self.peek(1) in '\0 \t\r\n\x85\u2028\u2029'
-
- def check_value(self):
-
- # VALUE(flow context): ':'
- if self.flow_level:
- return True
-
- # VALUE(block context): ':' (' '|'\n')
- else:
- return self.peek(1) in '\0 \t\r\n\x85\u2028\u2029'
-
- def check_plain(self):
-
- # A plain scalar may start with any non-space character except:
- # '-', '?', ':', ',', '[', ']', '{', '}',
- # '#', '&', '*', '!', '|', '>', '\'', '\"',
- # '%', '@', '`'.
- #
- # It may also start with
- # '-', '?', ':'
- # if it is followed by a non-space character.
- #
- # Note that we limit the last rule to the block context (except the
- # '-' character) because we want the flow context to be space
- # independent.
- ch = self.peek()
- return ch not in '\0 \t\r\n\x85\u2028\u2029-?:,[]{}#&*!|>\'\"%@`' \
- or (self.peek(1) not in '\0 \t\r\n\x85\u2028\u2029'
- and (ch == '-' or (not self.flow_level and ch in '?:')))
-
- # Scanners.
-
- def scan_to_next_token(self):
- # We ignore spaces, line breaks and comments.
- # If we find a line break in the block context, we set the flag
- # `allow_simple_key` on.
- # The byte order mark is stripped if it's the first character in the
- # stream. We do not yet support BOM inside the stream as the
- # specification requires. Any such mark will be considered as a part
- # of the document.
- #
- # TODO: We need to make tab handling rules more sane. A good rule is
- # Tabs cannot precede tokens
- # BLOCK-SEQUENCE-START, BLOCK-MAPPING-START, BLOCK-END,
- # KEY(block), VALUE(block), BLOCK-ENTRY
- # So the checking code is
- # if <TAB>:
- # self.allow_simple_keys = False
- # We also need to add the check for `allow_simple_keys == True` to
- # `unwind_indent` before issuing BLOCK-END.
- # Scanners for block, flow, and plain scalars need to be modified.
-
- if self.index == 0 and self.peek() == '\uFEFF':
- self.forward()
- found = False
- while not found:
- while self.peek() == ' ':
- self.forward()
- if self.peek() == '#':
- while self.peek() not in '\0\r\n\x85\u2028\u2029':
- self.forward()
- if self.scan_line_break():
- if not self.flow_level:
- self.allow_simple_key = True
- else:
- found = True
-
- def scan_directive(self):
- # See the specification for details.
- start_mark = self.get_mark()
- self.forward()
- name = self.scan_directive_name(start_mark)
- value = None
- if name == 'YAML':
- value = self.scan_yaml_directive_value(start_mark)
- end_mark = self.get_mark()
- elif name == 'TAG':
- value = self.scan_tag_directive_value(start_mark)
- end_mark = self.get_mark()
- else:
- end_mark = self.get_mark()
- while self.peek() not in '\0\r\n\x85\u2028\u2029':
- self.forward()
- self.scan_directive_ignored_line(start_mark)
- return DirectiveToken(name, value, start_mark, end_mark)
-
- def scan_directive_name(self, start_mark):
- # See the specification for details.
- length = 0
- ch = self.peek(length)
- while '0' <= ch <= '9' or 'A' <= ch <= 'Z' or 'a' <= ch <= 'z' \
- or ch in '-_':
- length += 1
- ch = self.peek(length)
- if not length:
- raise ScannerError("while scanning a directive", start_mark,
- "expected alphabetic or numeric character, but found %r"
- % ch, self.get_mark())
- value = self.prefix(length)
- self.forward(length)
- ch = self.peek()
- if ch not in '\0 \r\n\x85\u2028\u2029':
- raise ScannerError("while scanning a directive", start_mark,
- "expected alphabetic or numeric character, but found %r"
- % ch, self.get_mark())
- return value
-
- def scan_yaml_directive_value(self, start_mark):
- # See the specification for details.
- while self.peek() == ' ':
- self.forward()
- major = self.scan_yaml_directive_number(start_mark)
- if self.peek() != '.':
- raise ScannerError("while scanning a directive", start_mark,
- "expected a digit or '.', but found %r" % self.peek(),
- self.get_mark())
- self.forward()
- minor = self.scan_yaml_directive_number(start_mark)
- if self.peek() not in '\0 \r\n\x85\u2028\u2029':
- raise ScannerError("while scanning a directive", start_mark,
- "expected a digit or ' ', but found %r" % self.peek(),
- self.get_mark())
- return (major, minor)
-
- def scan_yaml_directive_number(self, start_mark):
- # See the specification for details.
- ch = self.peek()
- if not ('0' <= ch <= '9'):
- raise ScannerError("while scanning a directive", start_mark,
- "expected a digit, but found %r" % ch, self.get_mark())
- length = 0
- while '0' <= self.peek(length) <= '9':
- length += 1
- value = int(self.prefix(length))
- self.forward(length)
- return value
-
- def scan_tag_directive_value(self, start_mark):
- # See the specification for details.
- while self.peek() == ' ':
- self.forward()
- handle = self.scan_tag_directive_handle(start_mark)
- while self.peek() == ' ':
- self.forward()
- prefix = self.scan_tag_directive_prefix(start_mark)
- return (handle, prefix)
-
- def scan_tag_directive_handle(self, start_mark):
- # See the specification for details.
- value = self.scan_tag_handle('directive', start_mark)
- ch = self.peek()
- if ch != ' ':
- raise ScannerError("while scanning a directive", start_mark,
- "expected ' ', but found %r" % ch, self.get_mark())
- return value
-
- def scan_tag_directive_prefix(self, start_mark):
- # See the specification for details.
- value = self.scan_tag_uri('directive', start_mark)
- ch = self.peek()
- if ch not in '\0 \r\n\x85\u2028\u2029':
- raise ScannerError("while scanning a directive", start_mark,
- "expected ' ', but found %r" % ch, self.get_mark())
- return value
-
- def scan_directive_ignored_line(self, start_mark):
- # See the specification for details.
- while self.peek() == ' ':
- self.forward()
- if self.peek() == '#':
- while self.peek() not in '\0\r\n\x85\u2028\u2029':
- self.forward()
- ch = self.peek()
- if ch not in '\0\r\n\x85\u2028\u2029':
- raise ScannerError("while scanning a directive", start_mark,
- "expected a comment or a line break, but found %r"
- % ch, self.get_mark())
- self.scan_line_break()
-
- def scan_anchor(self, TokenClass):
- # The specification does not restrict characters for anchors and
- # aliases. This may lead to problems, for instance, the document:
- # [ *alias, value ]
- # can be interpteted in two ways, as
- # [ "value" ]
- # and
- # [ *alias , "value" ]
- # Therefore we restrict aliases to numbers and ASCII letters.
- start_mark = self.get_mark()
- indicator = self.peek()
- if indicator == '*':
- name = 'alias'
- else:
- name = 'anchor'
- self.forward()
- length = 0
- ch = self.peek(length)
- while '0' <= ch <= '9' or 'A' <= ch <= 'Z' or 'a' <= ch <= 'z' \
- or ch in '-_':
- length += 1
- ch = self.peek(length)
- if not length:
- raise ScannerError("while scanning an %s" % name, start_mark,
- "expected alphabetic or numeric character, but found %r"
- % ch, self.get_mark())
- value = self.prefix(length)
- self.forward(length)
- ch = self.peek()
- if ch not in '\0 \t\r\n\x85\u2028\u2029?:,]}%@`':
- raise ScannerError("while scanning an %s" % name, start_mark,
- "expected alphabetic or numeric character, but found %r"
- % ch, self.get_mark())
- end_mark = self.get_mark()
- return TokenClass(value, start_mark, end_mark)
-
- def scan_tag(self):
- # See the specification for details.
- start_mark = self.get_mark()
- ch = self.peek(1)
- if ch == '<':
- handle = None
- self.forward(2)
- suffix = self.scan_tag_uri('tag', start_mark)
- if self.peek() != '>':
- raise ScannerError("while parsing a tag", start_mark,
- "expected '>', but found %r" % self.peek(),
- self.get_mark())
- self.forward()
- elif ch in '\0 \t\r\n\x85\u2028\u2029':
- handle = None
- suffix = '!'
- self.forward()
- else:
- length = 1
- use_handle = False
- while ch not in '\0 \r\n\x85\u2028\u2029':
- if ch == '!':
- use_handle = True
- break
- length += 1
- ch = self.peek(length)
- handle = '!'
- if use_handle:
- handle = self.scan_tag_handle('tag', start_mark)
- else:
- handle = '!'
- self.forward()
- suffix = self.scan_tag_uri('tag', start_mark)
- ch = self.peek()
- if ch not in '\0 \r\n\x85\u2028\u2029':
- raise ScannerError("while scanning a tag", start_mark,
- "expected ' ', but found %r" % ch, self.get_mark())
- value = (handle, suffix)
- end_mark = self.get_mark()
- return TagToken(value, start_mark, end_mark)
-
- def scan_block_scalar(self, style):
- # See the specification for details.
-
- if style == '>':
- folded = True
- else:
- folded = False
-
- chunks = []
- start_mark = self.get_mark()
-
- # Scan the header.
- self.forward()
- chomping, increment = self.scan_block_scalar_indicators(start_mark)
- self.scan_block_scalar_ignored_line(start_mark)
-
- # Determine the indentation level and go to the first non-empty line.
- min_indent = self.indent+1
- if min_indent < 1:
- min_indent = 1
- if increment is None:
- breaks, max_indent, end_mark = self.scan_block_scalar_indentation()
- indent = max(min_indent, max_indent)
- else:
- indent = min_indent+increment-1
- breaks, end_mark = self.scan_block_scalar_breaks(indent)
- line_break = ''
-
- # Scan the inner part of the block scalar.
- while self.column == indent and self.peek() != '\0':
- chunks.extend(breaks)
- leading_non_space = self.peek() not in ' \t'
- length = 0
- while self.peek(length) not in '\0\r\n\x85\u2028\u2029':
- length += 1
- chunks.append(self.prefix(length))
- self.forward(length)
- line_break = self.scan_line_break()
- breaks, end_mark = self.scan_block_scalar_breaks(indent)
- if self.column == indent and self.peek() != '\0':
-
- # Unfortunately, folding rules are ambiguous.
- #
- # This is the folding according to the specification:
-
- if folded and line_break == '\n' \
- and leading_non_space and self.peek() not in ' \t':
- if not breaks:
- chunks.append(' ')
- else:
- chunks.append(line_break)
-
- # This is Clark Evans's interpretation (also in the spec
- # examples):
- #
- #if folded and line_break == '\n':
- # if not breaks:
- # if self.peek() not in ' \t':
- # chunks.append(' ')
- # else:
- # chunks.append(line_break)
- #else:
- # chunks.append(line_break)
- else:
- break
-
- # Chomp the tail.
- if chomping is not False:
- chunks.append(line_break)
- if chomping is True:
- chunks.extend(breaks)
-
- # We are done.
- return ScalarToken(''.join(chunks), False, start_mark, end_mark,
- style)
-
- def scan_block_scalar_indicators(self, start_mark):
- # See the specification for details.
- chomping = None
- increment = None
- ch = self.peek()
- if ch in '+-':
- if ch == '+':
- chomping = True
- else:
- chomping = False
- self.forward()
- ch = self.peek()
- if ch in '0123456789':
- increment = int(ch)
- if increment == 0:
- raise ScannerError("while scanning a block scalar", start_mark,
- "expected indentation indicator in the range 1-9, but found 0",
- self.get_mark())
- self.forward()
- elif ch in '0123456789':
- increment = int(ch)
- if increment == 0:
- raise ScannerError("while scanning a block scalar", start_mark,
- "expected indentation indicator in the range 1-9, but found 0",
- self.get_mark())
- self.forward()
- ch = self.peek()
- if ch in '+-':
- if ch == '+':
- chomping = True
- else:
- chomping = False
- self.forward()
- ch = self.peek()
- if ch not in '\0 \r\n\x85\u2028\u2029':
- raise ScannerError("while scanning a block scalar", start_mark,
- "expected chomping or indentation indicators, but found %r"
- % ch, self.get_mark())
- return chomping, increment
-
- def scan_block_scalar_ignored_line(self, start_mark):
- # See the specification for details.
- while self.peek() == ' ':
- self.forward()
- if self.peek() == '#':
- while self.peek() not in '\0\r\n\x85\u2028\u2029':
- self.forward()
- ch = self.peek()
- if ch not in '\0\r\n\x85\u2028\u2029':
- raise ScannerError("while scanning a block scalar", start_mark,
- "expected a comment or a line break, but found %r" % ch,
- self.get_mark())
- self.scan_line_break()
-
- def scan_block_scalar_indentation(self):
- # See the specification for details.
- chunks = []
- max_indent = 0
- end_mark = self.get_mark()
- while self.peek() in ' \r\n\x85\u2028\u2029':
- if self.peek() != ' ':
- chunks.append(self.scan_line_break())
- end_mark = self.get_mark()
- else:
- self.forward()
- if self.column > max_indent:
- max_indent = self.column
- return chunks, max_indent, end_mark
-
- def scan_block_scalar_breaks(self, indent):
- # See the specification for details.
- chunks = []
- end_mark = self.get_mark()
- while self.column < indent and self.peek() == ' ':
- self.forward()
- while self.peek() in '\r\n\x85\u2028\u2029':
- chunks.append(self.scan_line_break())
- end_mark = self.get_mark()
- while self.column < indent and self.peek() == ' ':
- self.forward()
- return chunks, end_mark
-
- def scan_flow_scalar(self, style):
- # See the specification for details.
- # Note that we loose indentation rules for quoted scalars. Quoted
- # scalars don't need to adhere indentation because " and ' clearly
- # mark the beginning and the end of them. Therefore we are less
- # restrictive then the specification requires. We only need to check
- # that document separators are not included in scalars.
- if style == '"':
- double = True
- else:
- double = False
- chunks = []
- start_mark = self.get_mark()
- quote = self.peek()
- self.forward()
- chunks.extend(self.scan_flow_scalar_non_spaces(double, start_mark))
- while self.peek() != quote:
- chunks.extend(self.scan_flow_scalar_spaces(double, start_mark))
- chunks.extend(self.scan_flow_scalar_non_spaces(double, start_mark))
- self.forward()
- end_mark = self.get_mark()
- return ScalarToken(''.join(chunks), False, start_mark, end_mark,
- style)
-
- ESCAPE_REPLACEMENTS = {
- '0': '\0',
- 'a': '\x07',
- 'b': '\x08',
- 't': '\x09',
- '\t': '\x09',
- 'n': '\x0A',
- 'v': '\x0B',
- 'f': '\x0C',
- 'r': '\x0D',
- 'e': '\x1B',
- ' ': '\x20',
- '\"': '\"',
- '\\': '\\',
- 'N': '\x85',
- '_': '\xA0',
- 'L': '\u2028',
- 'P': '\u2029',
- }
-
- ESCAPE_CODES = {
- 'x': 2,
- 'u': 4,
- 'U': 8,
- }
-
- def scan_flow_scalar_non_spaces(self, double, start_mark):
- # See the specification for details.
- chunks = []
- while True:
- length = 0
- while self.peek(length) not in '\'\"\\\0 \t\r\n\x85\u2028\u2029':
- length += 1
- if length:
- chunks.append(self.prefix(length))
- self.forward(length)
- ch = self.peek()
- if not double and ch == '\'' and self.peek(1) == '\'':
- chunks.append('\'')
- self.forward(2)
- elif (double and ch == '\'') or (not double and ch in '\"\\'):
- chunks.append(ch)
- self.forward()
- elif double and ch == '\\':
- self.forward()
- ch = self.peek()
- if ch in self.ESCAPE_REPLACEMENTS:
- chunks.append(self.ESCAPE_REPLACEMENTS[ch])
- self.forward()
- elif ch in self.ESCAPE_CODES:
- length = self.ESCAPE_CODES[ch]
- self.forward()
- for k in range(length):
- if self.peek(k) not in '0123456789ABCDEFabcdef':
- raise ScannerError("while scanning a double-quoted scalar", start_mark,
- "expected escape sequence of %d hexdecimal numbers, but found %r" %
- (length, self.peek(k)), self.get_mark())
- code = int(self.prefix(length), 16)
- chunks.append(chr(code))
- self.forward(length)
- elif ch in '\r\n\x85\u2028\u2029':
- self.scan_line_break()
- chunks.extend(self.scan_flow_scalar_breaks(double, start_mark))
- else:
- raise ScannerError("while scanning a double-quoted scalar", start_mark,
- "found unknown escape character %r" % ch, self.get_mark())
- else:
- return chunks
-
- def scan_flow_scalar_spaces(self, double, start_mark):
- # See the specification for details.
- chunks = []
- length = 0
- while self.peek(length) in ' \t':
- length += 1
- whitespaces = self.prefix(length)
- self.forward(length)
- ch = self.peek()
- if ch == '\0':
- raise ScannerError("while scanning a quoted scalar", start_mark,
- "found unexpected end of stream", self.get_mark())
- elif ch in '\r\n\x85\u2028\u2029':
- line_break = self.scan_line_break()
- breaks = self.scan_flow_scalar_breaks(double, start_mark)
- if line_break != '\n':
- chunks.append(line_break)
- elif not breaks:
- chunks.append(' ')
- chunks.extend(breaks)
- else:
- chunks.append(whitespaces)
- return chunks
-
- def scan_flow_scalar_breaks(self, double, start_mark):
- # See the specification for details.
- chunks = []
- while True:
- # Instead of checking indentation, we check for document
- # separators.
- prefix = self.prefix(3)
- if (prefix == '---' or prefix == '...') \
- and self.peek(3) in '\0 \t\r\n\x85\u2028\u2029':
- raise ScannerError("while scanning a quoted scalar", start_mark,
- "found unexpected document separator", self.get_mark())
- while self.peek() in ' \t':
- self.forward()
- if self.peek() in '\r\n\x85\u2028\u2029':
- chunks.append(self.scan_line_break())
- else:
- return chunks
-
- def scan_plain(self):
- # See the specification for details.
- # We add an additional restriction for the flow context:
- # plain scalars in the flow context cannot contain ',', ':' and '?'.
- # We also keep track of the `allow_simple_key` flag here.
- # Indentation rules are loosed for the flow context.
- chunks = []
- start_mark = self.get_mark()
- end_mark = start_mark
- indent = self.indent+1
- # We allow zero indentation for scalars, but then we need to check for
- # document separators at the beginning of the line.
- #if indent == 0:
- # indent = 1
- spaces = []
- while True:
- length = 0
- if self.peek() == '#':
- break
- while True:
- ch = self.peek(length)
- if ch in '\0 \t\r\n\x85\u2028\u2029' \
- or (not self.flow_level and ch == ':' and
- self.peek(length+1) in '\0 \t\r\n\x85\u2028\u2029') \
- or (self.flow_level and ch in ',:?[]{}'):
- break
- length += 1
- # It's not clear what we should do with ':' in the flow context.
- if (self.flow_level and ch == ':'
- and self.peek(length+1) not in '\0 \t\r\n\x85\u2028\u2029,[]{}'):
- self.forward(length)
- raise ScannerError("while scanning a plain scalar", start_mark,
- "found unexpected ':'", self.get_mark(),
- "Please check http://pyyaml.org/wiki/YAMLColonInFlowContext for details.")
- if length == 0:
- break
- self.allow_simple_key = False
- chunks.extend(spaces)
- chunks.append(self.prefix(length))
- self.forward(length)
- end_mark = self.get_mark()
- spaces = self.scan_plain_spaces(indent, start_mark)
- if not spaces or self.peek() == '#' \
- or (not self.flow_level and self.column < indent):
- break
- return ScalarToken(''.join(chunks), True, start_mark, end_mark)
-
- def scan_plain_spaces(self, indent, start_mark):
- # See the specification for details.
- # The specification is really confusing about tabs in plain scalars.
- # We just forbid them completely. Do not use tabs in YAML!
- chunks = []
- length = 0
- while self.peek(length) in ' ':
- length += 1
- whitespaces = self.prefix(length)
- self.forward(length)
- ch = self.peek()
- if ch in '\r\n\x85\u2028\u2029':
- line_break = self.scan_line_break()
- self.allow_simple_key = True
- prefix = self.prefix(3)
- if (prefix == '---' or prefix == '...') \
- and self.peek(3) in '\0 \t\r\n\x85\u2028\u2029':
- return
- breaks = []
- while self.peek() in ' \r\n\x85\u2028\u2029':
- if self.peek() == ' ':
- self.forward()
- else:
- breaks.append(self.scan_line_break())
- prefix = self.prefix(3)
- if (prefix == '---' or prefix == '...') \
- and self.peek(3) in '\0 \t\r\n\x85\u2028\u2029':
- return
- if line_break != '\n':
- chunks.append(line_break)
- elif not breaks:
- chunks.append(' ')
- chunks.extend(breaks)
- elif whitespaces:
- chunks.append(whitespaces)
- return chunks
-
- def scan_tag_handle(self, name, start_mark):
- # See the specification for details.
- # For some strange reasons, the specification does not allow '_' in
- # tag handles. I have allowed it anyway.
- ch = self.peek()
- if ch != '!':
- raise ScannerError("while scanning a %s" % name, start_mark,
- "expected '!', but found %r" % ch, self.get_mark())
- length = 1
- ch = self.peek(length)
- if ch != ' ':
- while '0' <= ch <= '9' or 'A' <= ch <= 'Z' or 'a' <= ch <= 'z' \
- or ch in '-_':
- length += 1
- ch = self.peek(length)
- if ch != '!':
- self.forward(length)
- raise ScannerError("while scanning a %s" % name, start_mark,
- "expected '!', but found %r" % ch, self.get_mark())
- length += 1
- value = self.prefix(length)
- self.forward(length)
- return value
-
- def scan_tag_uri(self, name, start_mark):
- # See the specification for details.
- # Note: we do not check if URI is well-formed.
- chunks = []
- length = 0
- ch = self.peek(length)
- while '0' <= ch <= '9' or 'A' <= ch <= 'Z' or 'a' <= ch <= 'z' \
- or ch in '-;/?:@&=+$,_.!~*\'()[]%':
- if ch == '%':
- chunks.append(self.prefix(length))
- self.forward(length)
- length = 0
- chunks.append(self.scan_uri_escapes(name, start_mark))
- else:
- length += 1
- ch = self.peek(length)
- if length:
- chunks.append(self.prefix(length))
- self.forward(length)
- length = 0
- if not chunks:
- raise ScannerError("while parsing a %s" % name, start_mark,
- "expected URI, but found %r" % ch, self.get_mark())
- return ''.join(chunks)
-
- def scan_uri_escapes(self, name, start_mark):
- # See the specification for details.
- codes = []
- mark = self.get_mark()
- while self.peek() == '%':
- self.forward()
- for k in range(2):
- if self.peek(k) not in '0123456789ABCDEFabcdef':
- raise ScannerError("while scanning a %s" % name, start_mark,
- "expected URI escape sequence of 2 hexdecimal numbers, but found %r"
- % self.peek(k), self.get_mark())
- codes.append(int(self.prefix(2), 16))
- self.forward(2)
- try:
- value = bytes(codes).decode('utf-8')
- except UnicodeDecodeError as exc:
- raise ScannerError("while scanning a %s" % name, start_mark, str(exc), mark)
- return value
-
- def scan_line_break(self):
- # Transforms:
- # '\r\n' : '\n'
- # '\r' : '\n'
- # '\n' : '\n'
- # '\x85' : '\n'
- # '\u2028' : '\u2028'
- # '\u2029 : '\u2029'
- # default : ''
- ch = self.peek()
- if ch in '\r\n\x85':
- if self.prefix(2) == '\r\n':
- self.forward(2)
- else:
- self.forward()
- return '\n'
- elif ch in '\u2028\u2029':
- self.forward()
- return ch
- return ''
-
-#try:
-# import psyco
-# psyco.bind(Scanner)
-#except ImportError:
-# pass
-
diff --git a/src/collectors/python.d.plugin/python_modules/pyyaml3/serializer.py b/src/collectors/python.d.plugin/python_modules/pyyaml3/serializer.py
deleted file mode 100644
index 1ba2f7f9d..000000000
--- a/src/collectors/python.d.plugin/python_modules/pyyaml3/serializer.py
+++ /dev/null
@@ -1,112 +0,0 @@
-# SPDX-License-Identifier: MIT
-
-__all__ = ['Serializer', 'SerializerError']
-
-from .error import YAMLError
-from .events import *
-from .nodes import *
-
-class SerializerError(YAMLError):
- pass
-
-class Serializer:
-
- ANCHOR_TEMPLATE = 'id%03d'
-
- def __init__(self, encoding=None,
- explicit_start=None, explicit_end=None, version=None, tags=None):
- self.use_encoding = encoding
- self.use_explicit_start = explicit_start
- self.use_explicit_end = explicit_end
- self.use_version = version
- self.use_tags = tags
- self.serialized_nodes = {}
- self.anchors = {}
- self.last_anchor_id = 0
- self.closed = None
-
- def open(self):
- if self.closed is None:
- self.emit(StreamStartEvent(encoding=self.use_encoding))
- self.closed = False
- elif self.closed:
- raise SerializerError("serializer is closed")
- else:
- raise SerializerError("serializer is already opened")
-
- def close(self):
- if self.closed is None:
- raise SerializerError("serializer is not opened")
- elif not self.closed:
- self.emit(StreamEndEvent())
- self.closed = True
-
- #def __del__(self):
- # self.close()
-
- def serialize(self, node):
- if self.closed is None:
- raise SerializerError("serializer is not opened")
- elif self.closed:
- raise SerializerError("serializer is closed")
- self.emit(DocumentStartEvent(explicit=self.use_explicit_start,
- version=self.use_version, tags=self.use_tags))
- self.anchor_node(node)
- self.serialize_node(node, None, None)
- self.emit(DocumentEndEvent(explicit=self.use_explicit_end))
- self.serialized_nodes = {}
- self.anchors = {}
- self.last_anchor_id = 0
-
- def anchor_node(self, node):
- if node in self.anchors:
- if self.anchors[node] is None:
- self.anchors[node] = self.generate_anchor(node)
- else:
- self.anchors[node] = None
- if isinstance(node, SequenceNode):
- for item in node.value:
- self.anchor_node(item)
- elif isinstance(node, MappingNode):
- for key, value in node.value:
- self.anchor_node(key)
- self.anchor_node(value)
-
- def generate_anchor(self, node):
- self.last_anchor_id += 1
- return self.ANCHOR_TEMPLATE % self.last_anchor_id
-
- def serialize_node(self, node, parent, index):
- alias = self.anchors[node]
- if node in self.serialized_nodes:
- self.emit(AliasEvent(alias))
- else:
- self.serialized_nodes[node] = True
- self.descend_resolver(parent, index)
- if isinstance(node, ScalarNode):
- detected_tag = self.resolve(ScalarNode, node.value, (True, False))
- default_tag = self.resolve(ScalarNode, node.value, (False, True))
- implicit = (node.tag == detected_tag), (node.tag == default_tag)
- self.emit(ScalarEvent(alias, node.tag, implicit, node.value,
- style=node.style))
- elif isinstance(node, SequenceNode):
- implicit = (node.tag
- == self.resolve(SequenceNode, node.value, True))
- self.emit(SequenceStartEvent(alias, node.tag, implicit,
- flow_style=node.flow_style))
- index = 0
- for item in node.value:
- self.serialize_node(item, node, index)
- index += 1
- self.emit(SequenceEndEvent())
- elif isinstance(node, MappingNode):
- implicit = (node.tag
- == self.resolve(MappingNode, node.value, True))
- self.emit(MappingStartEvent(alias, node.tag, implicit,
- flow_style=node.flow_style))
- for key, value in node.value:
- self.serialize_node(key, node, None)
- self.serialize_node(value, node, key)
- self.emit(MappingEndEvent())
- self.ascend_resolver()
-
diff --git a/src/collectors/python.d.plugin/python_modules/pyyaml3/tokens.py b/src/collectors/python.d.plugin/python_modules/pyyaml3/tokens.py
deleted file mode 100644
index c5c4fb116..000000000
--- a/src/collectors/python.d.plugin/python_modules/pyyaml3/tokens.py
+++ /dev/null
@@ -1,105 +0,0 @@
-# SPDX-License-Identifier: MIT
-
-class Token(object):
- def __init__(self, start_mark, end_mark):
- self.start_mark = start_mark
- self.end_mark = end_mark
- def __repr__(self):
- attributes = [key for key in self.__dict__
- if not key.endswith('_mark')]
- attributes.sort()
- arguments = ', '.join(['%s=%r' % (key, getattr(self, key))
- for key in attributes])
- return '%s(%s)' % (self.__class__.__name__, arguments)
-
-#class BOMToken(Token):
-# id = '<byte order mark>'
-
-class DirectiveToken(Token):
- id = '<directive>'
- def __init__(self, name, value, start_mark, end_mark):
- self.name = name
- self.value = value
- self.start_mark = start_mark
- self.end_mark = end_mark
-
-class DocumentStartToken(Token):
- id = '<document start>'
-
-class DocumentEndToken(Token):
- id = '<document end>'
-
-class StreamStartToken(Token):
- id = '<stream start>'
- def __init__(self, start_mark=None, end_mark=None,
- encoding=None):
- self.start_mark = start_mark
- self.end_mark = end_mark
- self.encoding = encoding
-
-class StreamEndToken(Token):
- id = '<stream end>'
-
-class BlockSequenceStartToken(Token):
- id = '<block sequence start>'
-
-class BlockMappingStartToken(Token):
- id = '<block mapping start>'
-
-class BlockEndToken(Token):
- id = '<block end>'
-
-class FlowSequenceStartToken(Token):
- id = '['
-
-class FlowMappingStartToken(Token):
- id = '{'
-
-class FlowSequenceEndToken(Token):
- id = ']'
-
-class FlowMappingEndToken(Token):
- id = '}'
-
-class KeyToken(Token):
- id = '?'
-
-class ValueToken(Token):
- id = ':'
-
-class BlockEntryToken(Token):
- id = '-'
-
-class FlowEntryToken(Token):
- id = ','
-
-class AliasToken(Token):
- id = '<alias>'
- def __init__(self, value, start_mark, end_mark):
- self.value = value
- self.start_mark = start_mark
- self.end_mark = end_mark
-
-class AnchorToken(Token):
- id = '<anchor>'
- def __init__(self, value, start_mark, end_mark):
- self.value = value
- self.start_mark = start_mark
- self.end_mark = end_mark
-
-class TagToken(Token):
- id = '<tag>'
- def __init__(self, value, start_mark, end_mark):
- self.value = value
- self.start_mark = start_mark
- self.end_mark = end_mark
-
-class ScalarToken(Token):
- id = '<scalar>'
- def __init__(self, value, plain, start_mark, end_mark, style=None):
- self.value = value
- self.plain = plain
- self.start_mark = start_mark
- self.end_mark = end_mark
- self.style = style
-
diff --git a/src/collectors/python.d.plugin/python_modules/third_party/boinc_client.py b/src/collectors/python.d.plugin/python_modules/third_party/boinc_client.py
deleted file mode 100644
index ec21779a0..000000000
--- a/src/collectors/python.d.plugin/python_modules/third_party/boinc_client.py
+++ /dev/null
@@ -1,515 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-#
-# client.py - Somewhat higher-level GUI_RPC API for BOINC core client
-#
-# Copyright (C) 2013 Rodrigo Silva (MestreLion) <linux@rodrigosilva.com>
-# Copyright (C) 2017 Austin S. Hemmelgarn
-#
-# SPDX-License-Identifier: GPL-3.0
-
-# Based on client/boinc_cmd.cpp
-
-import hashlib
-import socket
-import sys
-import time
-from functools import total_ordering
-from xml.etree import ElementTree
-
-GUI_RPC_PASSWD_FILE = "/var/lib/boinc/gui_rpc_auth.cfg"
-
-GUI_RPC_HOSTNAME = None # localhost
-GUI_RPC_PORT = 31416
-GUI_RPC_TIMEOUT = 1
-
-class Rpc(object):
- ''' Class to perform GUI RPC calls to a BOINC core client.
- Usage in a context manager ('with' block) is recommended to ensure
- disconnect() is called. Using the same instance for all calls is also
- recommended so it reuses the same socket connection
- '''
- def __init__(self, hostname="", port=0, timeout=0, text_output=False):
- self.hostname = hostname
- self.port = port
- self.timeout = timeout
- self.sock = None
- self.text_output = text_output
-
- @property
- def sockargs(self):
- return (self.hostname, self.port, self.timeout)
-
- def __enter__(self): self.connect(*self.sockargs); return self
- def __exit__(self, *args): self.disconnect()
-
- def connect(self, hostname="", port=0, timeout=0):
- ''' Connect to (hostname, port) with timeout in seconds.
- Hostname defaults to None (localhost), and port to 31416
- Calling multiple times will disconnect previous connection (if any),
- and (re-)connect to host.
- '''
- if self.sock:
- self.disconnect()
-
- self.hostname = hostname or GUI_RPC_HOSTNAME
- self.port = port or GUI_RPC_PORT
- self.timeout = timeout or GUI_RPC_TIMEOUT
-
- self.sock = socket.create_connection(self.sockargs[0:2], self.sockargs[2])
-
- def disconnect(self):
- ''' Disconnect from host. Calling multiple times is OK (idempotent)
- '''
- if self.sock:
- self.sock.close()
- self.sock = None
-
- def call(self, request, text_output=None):
- ''' Do an RPC call. Pack and send the XML request and return the
- unpacked reply. request can be either plain XML text or a
- xml.etree.ElementTree.Element object. Return ElementTree.Element
- or XML text according to text_output flag.
- Will auto-connect if not connected.
- '''
- if text_output is None:
- text_output = self.text_output
-
- if not self.sock:
- self.connect(*self.sockargs)
-
- if not isinstance(request, ElementTree.Element):
- request = ElementTree.fromstring(request)
-
- # pack request
- end = '\003'
- if sys.version_info[0] < 3:
- req = "<boinc_gui_rpc_request>\n{0}\n</boinc_gui_rpc_request>\n{1}".format(ElementTree.tostring(request).replace(' />', '/>'), end)
- else:
- req = "<boinc_gui_rpc_request>\n{0}\n</boinc_gui_rpc_request>\n{1}".format(ElementTree.tostring(request, encoding='unicode').replace(' />', '/>'), end).encode()
-
- try:
- self.sock.sendall(req)
- except (socket.error, socket.herror, socket.gaierror, socket.timeout):
- raise
-
- req = ""
- while True:
- try:
- buf = self.sock.recv(8192)
- if not buf:
- raise socket.error("No data from socket")
- if sys.version_info[0] >= 3:
- buf = buf.decode()
- except socket.error:
- raise
- n = buf.find(end)
- if not n == -1: break
- req += buf
- req += buf[:n]
-
- # unpack reply (remove root tag, ie: first and last lines)
- req = '\n'.join(req.strip().rsplit('\n')[1:-1])
-
- if text_output:
- return req
- else:
- return ElementTree.fromstring(req)
-
-def setattrs_from_xml(obj, xml, attrfuncdict={}):
- ''' Helper to set values for attributes of a class instance by mapping
- matching tags from a XML file.
- attrfuncdict is a dict of functions to customize value data type of
- each attribute. It falls back to simple int/float/bool/str detection
- based on values defined in __init__(). This would not be needed if
- Boinc used standard RPC protocol, which includes data type in XML.
- '''
- if not isinstance(xml, ElementTree.Element):
- xml = ElementTree.fromstring(xml)
- for e in list(xml):
- if hasattr(obj, e.tag):
- attr = getattr(obj, e.tag)
- attrfunc = attrfuncdict.get(e.tag, None)
- if attrfunc is None:
- if isinstance(attr, bool): attrfunc = parse_bool
- elif isinstance(attr, int): attrfunc = parse_int
- elif isinstance(attr, float): attrfunc = parse_float
- elif isinstance(attr, str): attrfunc = parse_str
- elif isinstance(attr, list): attrfunc = parse_list
- else: attrfunc = lambda x: x
- setattr(obj, e.tag, attrfunc(e))
- else:
- pass
- #print "class missing attribute '%s': %r" % (e.tag, obj)
- return obj
-
-
-def parse_bool(e):
- ''' Helper to convert ElementTree.Element.text to boolean.
- Treat '<foo/>' (and '<foo>[[:blank:]]</foo>') as True
- Treat '0' and 'false' as False
- '''
- if e.text is None:
- return True
- else:
- return bool(e.text) and not e.text.strip().lower() in ('0', 'false')
-
-
-def parse_int(e):
- ''' Helper to convert ElementTree.Element.text to integer.
- Treat '<foo/>' (and '<foo></foo>') as 0
- '''
- # int(float()) allows casting to int a value expressed as float in XML
- return 0 if e.text is None else int(float(e.text.strip()))
-
-
-def parse_float(e):
- ''' Helper to convert ElementTree.Element.text to float. '''
- return 0.0 if e.text is None else float(e.text.strip())
-
-
-def parse_str(e):
- ''' Helper to convert ElementTree.Element.text to string. '''
- return "" if e.text is None else e.text.strip()
-
-
-def parse_list(e):
- ''' Helper to convert ElementTree.Element to list. For now, simply return
- the list of root element's children
- '''
- return list(e)
-
-
-class Enum(object):
- UNKNOWN = -1 # Not in original API
-
- @classmethod
- def name(cls, value):
- ''' Quick-and-dirty fallback for getting the "name" of an enum item '''
-
- # value as string, if it matches an enum attribute.
- # Allows short usage as Enum.name("VALUE") besides Enum.name(Enum.VALUE)
- if hasattr(cls, str(value)):
- return cls.name(getattr(cls, value, None))
-
- # value not handled in subclass name()
- for k, v in cls.__dict__.items():
- if v == value:
- return k.lower().replace('_', ' ')
-
- # value not found
- return cls.name(Enum.UNKNOWN)
-
-
-class CpuSched(Enum):
- ''' values of ACTIVE_TASK::scheduler_state and ACTIVE_TASK::next_scheduler_state
- "SCHEDULED" is synonymous with "executing" except when CPU throttling
- is in use.
- '''
- UNINITIALIZED = 0
- PREEMPTED = 1
- SCHEDULED = 2
-
-
-class ResultState(Enum):
- ''' Values of RESULT::state in client.
- THESE MUST BE IN NUMERICAL ORDER
- (because of the > comparison in RESULT::computing_done())
- see html/inc/common_defs.inc
- '''
- NEW = 0
- #// New result
- FILES_DOWNLOADING = 1
- #// Input files for result (WU, app version) are being downloaded
- FILES_DOWNLOADED = 2
- #// Files are downloaded, result can be (or is being) computed
- COMPUTE_ERROR = 3
- #// computation failed; no file upload
- FILES_UPLOADING = 4
- #// Output files for result are being uploaded
- FILES_UPLOADED = 5
- #// Files are uploaded, notify scheduling server at some point
- ABORTED = 6
- #// result was aborted
- UPLOAD_FAILED = 7
- #// some output file permanent failure
-
-
-class Process(Enum):
- ''' values of ACTIVE_TASK::task_state '''
- UNINITIALIZED = 0
- #// process doesn't exist yet
- EXECUTING = 1
- #// process is running, as far as we know
- SUSPENDED = 9
- #// we've sent it a "suspend" message
- ABORT_PENDING = 5
- #// process exceeded limits; send "abort" message, waiting to exit
- QUIT_PENDING = 8
- #// we've sent it a "quit" message, waiting to exit
- COPY_PENDING = 10
- #// waiting for async file copies to finish
-
-
-class _Struct(object):
- ''' base helper class with common methods for all classes derived from
- BOINC's C++ structs
- '''
- @classmethod
- def parse(cls, xml):
- return setattrs_from_xml(cls(), xml)
-
- def __str__(self, indent=0):
- buf = '{0}{1}:\n'.format('\t' * indent, self.__class__.__name__)
- for attr in self.__dict__:
- value = getattr(self, attr)
- if isinstance(value, list):
- buf += '{0}\t{1} [\n'.format('\t' * indent, attr)
- for v in value: buf += '\t\t{0}\t\t,\n'.format(v)
- buf += '\t]\n'
- else:
- buf += '{0}\t{1}\t{2}\n'.format('\t' * indent,
- attr,
- value.__str__(indent+2)
- if isinstance(value, _Struct)
- else repr(value))
- return buf
-
-
-@total_ordering
-class VersionInfo(_Struct):
- def __init__(self, major=0, minor=0, release=0):
- self.major = major
- self.minor = minor
- self.release = release
-
- @property
- def _tuple(self):
- return (self.major, self.minor, self.release)
-
- def __eq__(self, other):
- return isinstance(other, self.__class__) and self._tuple == other._tuple
-
- def __ne__(self, other):
- return not self.__eq__(other)
-
- def __gt__(self, other):
- if not isinstance(other, self.__class__):
- return NotImplemented
- return self._tuple > other._tuple
-
- def __str__(self):
- return "{0}.{1}.{2}".format(self.major, self.minor, self.release)
-
- def __repr__(self):
- return "{0}{1}".format(self.__class__.__name__, self._tuple)
-
-
-class Result(_Struct):
- ''' Also called "task" in some contexts '''
- def __init__(self):
- # Names and values follow lib/gui_rpc_client.h @ RESULT
- # Order too, except when grouping contradicts client/result.cpp
- # RESULT::write_gui(), then XML order is used.
-
- self.name = ""
- self.wu_name = ""
- self.version_num = 0
- #// identifies the app used
- self.plan_class = ""
- self.project_url = "" # from PROJECT.master_url
- self.report_deadline = 0.0 # seconds since epoch
- self.received_time = 0.0 # seconds since epoch
- #// when we got this from server
- self.ready_to_report = False
- #// we're ready to report this result to the server;
- #// either computation is done and all the files have been uploaded
- #// or there was an error
- self.got_server_ack = False
- #// we've received the ack for this result from the server
- self.final_cpu_time = 0.0
- self.final_elapsed_time = 0.0
- self.state = ResultState.NEW
- self.estimated_cpu_time_remaining = 0.0
- #// actually, estimated elapsed time remaining
- self.exit_status = 0
- #// return value from the application
- self.suspended_via_gui = False
- self.project_suspended_via_gui = False
- self.edf_scheduled = False
- #// temporary used to tell GUI that this result is deadline-scheduled
- self.coproc_missing = False
- #// a coproc needed by this job is missing
- #// (e.g. because user removed their GPU board).
- self.scheduler_wait = False
- self.scheduler_wait_reason = ""
- self.network_wait = False
- self.resources = ""
- #// textual description of resources used
-
- #// the following defined if active
- # XML is generated in client/app.cpp ACTIVE_TASK::write_gui()
- self.active_task = False
- self.active_task_state = Process.UNINITIALIZED
- self.app_version_num = 0
- self.slot = -1
- self.pid = 0
- self.scheduler_state = CpuSched.UNINITIALIZED
- self.checkpoint_cpu_time = 0.0
- self.current_cpu_time = 0.0
- self.fraction_done = 0.0
- self.elapsed_time = 0.0
- self.swap_size = 0
- self.working_set_size_smoothed = 0.0
- self.too_large = False
- self.needs_shmem = False
- self.graphics_exec_path = ""
- self.web_graphics_url = ""
- self.remote_desktop_addr = ""
- self.slot_path = ""
- #// only present if graphics_exec_path is
-
- # The following are not in original API, but are present in RPC XML reply
- self.completed_time = 0.0
- #// time when ready_to_report was set
- self.report_immediately = False
- self.working_set_size = 0
- self.page_fault_rate = 0.0
- #// derived by higher-level code
-
- # The following are in API, but are NEVER in RPC XML reply. Go figure
- self.signal = 0
-
- self.app = None # APP*
- self.wup = None # WORKUNIT*
- self.project = None # PROJECT*
- self.avp = None # APP_VERSION*
-
- @classmethod
- def parse(cls, xml):
- if not isinstance(xml, ElementTree.Element):
- xml = ElementTree.fromstring(xml)
-
- # parse main XML
- result = super(Result, cls).parse(xml)
-
- # parse '<active_task>' children
- active_task = xml.find('active_task')
- if active_task is None:
- result.active_task = False # already the default after __init__()
- else:
- result.active_task = True # already the default after main parse
- result = setattrs_from_xml(result, active_task)
-
- #// if CPU time is nonzero but elapsed time is zero,
- #// we must be talking to an old client.
- #// Set elapsed = CPU
- #// (easier to deal with this here than in the manager)
- if result.current_cpu_time != 0 and result.elapsed_time == 0:
- result.elapsed_time = result.current_cpu_time
-
- if result.final_cpu_time != 0 and result.final_elapsed_time == 0:
- result.final_elapsed_time = result.final_cpu_time
-
- return result
-
- def __str__(self):
- buf = '{0}:\n'.format(self.__class__.__name__)
- for attr in self.__dict__:
- value = getattr(self, attr)
- if attr in ['received_time', 'report_deadline']:
- value = time.ctime(value)
- buf += '\t{0}\t{1}\n'.format(attr, value)
- return buf
-
-
-class BoincClient(object):
-
- def __init__(self, host="", port=0, passwd=None):
- self.hostname = host
- self.port = port
- self.passwd = passwd
- self.rpc = Rpc(text_output=False)
- self.version = None
- self.authorized = False
-
- # Informative, not authoritative. Records status of *last* RPC call,
- # but does not infer success about the *next* one.
- # Thus, it should be read *after* an RPC call, not prior to one
- self.connected = False
-
- def __enter__(self): self.connect(); return self
- def __exit__(self, *args): self.disconnect()
-
- def connect(self):
- try:
- self.rpc.connect(self.hostname, self.port)
- self.connected = True
- except socket.error:
- self.connected = False
- return
- self.authorized = self.authorize(self.passwd)
- self.version = self.exchange_versions()
-
- def disconnect(self):
- self.rpc.disconnect()
-
- def authorize(self, password):
- ''' Request authorization. If password is None and we are connecting
- to localhost, try to read password from the local config file
- GUI_RPC_PASSWD_FILE. If file can't be read (not found or no
- permission to read), try to authorize with a blank password.
- If authorization is requested and fails, all subsequent calls
- will be refused with socket.error 'Connection reset by peer' (104).
- Since most local calls do no require authorization, do not attempt
- it if you're not sure about the password.
- '''
- if password is None and not self.hostname:
- password = read_gui_rpc_password() or ""
- nonce = self.rpc.call('<auth1/>').text
- authhash = hashlib.md5('{0}{1}'.format(nonce, password).encode()).hexdigest().lower()
- reply = self.rpc.call('<auth2><nonce_hash>{0}</nonce_hash></auth2>'.format(authhash))
-
- if reply.tag == 'authorized':
- return True
- else:
- return False
-
- def exchange_versions(self):
- ''' Return VersionInfo instance with core client version info '''
- return VersionInfo.parse(self.rpc.call('<exchange_versions/>'))
-
- def get_tasks(self):
- ''' Same as get_results(active_only=False) '''
- return self.get_results(False)
-
- def get_results(self, active_only=False):
- ''' Get a list of results.
- Those that are in progress will have information such as CPU time
- and fraction done. Each result includes a name;
- Use CC_STATE::lookup_result() to find this result in the current static state;
- if it's not there, call get_state() again.
- '''
- reply = self.rpc.call("<get_results><active_only>{0}</active_only></get_results>".format(1 if active_only else 0))
- if not reply.tag == 'results':
- return []
-
- results = []
- for item in list(reply):
- results.append(Result.parse(item))
-
- return results
-
-
-def read_gui_rpc_password():
- ''' Read password string from GUI_RPC_PASSWD_FILE file, trim the last CR
- (if any), and return it
- '''
- try:
- with open(GUI_RPC_PASSWD_FILE, 'r') as f:
- buf = f.read()
- if buf.endswith('\n'): return buf[:-1] # trim last CR
- else: return buf
- except IOError:
- # Permission denied or File not found.
- pass
diff --git a/src/collectors/python.d.plugin/python_modules/third_party/mcrcon.py b/src/collectors/python.d.plugin/python_modules/third_party/mcrcon.py
deleted file mode 100644
index a65a304b6..000000000
--- a/src/collectors/python.d.plugin/python_modules/third_party/mcrcon.py
+++ /dev/null
@@ -1,74 +0,0 @@
-# Minecraft Remote Console module.
-#
-# Copyright (C) 2015 Barnaby Gale
-#
-# SPDX-License-Identifier: MIT
-
-import socket
-import select
-import struct
-import time
-
-
-class MCRconException(Exception):
- pass
-
-
-class MCRcon(object):
- socket = None
-
- def connect(self, host, port, password):
- if self.socket is not None:
- raise MCRconException("Already connected")
- self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
- self.socket.settimeout(0.9)
- self.socket.connect((host, port))
- self.send(3, password)
-
- def disconnect(self):
- if self.socket is None:
- raise MCRconException("Already disconnected")
- self.socket.close()
- self.socket = None
-
- def read(self, length):
- data = b""
- while len(data) < length:
- data += self.socket.recv(length - len(data))
- return data
-
- def send(self, out_type, out_data):
- if self.socket is None:
- raise MCRconException("Must connect before sending data")
-
- # Send a request packet
- out_payload = struct.pack('<ii', 0, out_type) + out_data.encode('utf8') + b'\x00\x00'
- out_length = struct.pack('<i', len(out_payload))
- self.socket.send(out_length + out_payload)
-
- # Read response packets
- in_data = ""
- while True:
- # Read a packet
- in_length, = struct.unpack('<i', self.read(4))
- in_payload = self.read(in_length)
- in_id = struct.unpack('<ii', in_payload[:8])
- in_data_partial, in_padding = in_payload[8:-2], in_payload[-2:]
-
- # Sanity checks
- if in_padding != b'\x00\x00':
- raise MCRconException("Incorrect padding")
- if in_id == -1:
- raise MCRconException("Login failed")
-
- # Record the response
- in_data += in_data_partial.decode('utf8')
-
- # If there's nothing more to receive, return the response
- if len(select.select([self.socket], [], [], 0)[0]) == 0:
- return in_data
-
- def command(self, command):
- result = self.send(2, command)
- time.sleep(0.003) # MC-72390 workaround
- return result
diff --git a/src/collectors/python.d.plugin/python_modules/urllib3/__init__.py b/src/collectors/python.d.plugin/python_modules/urllib3/__init__.py
deleted file mode 100644
index 3add84816..000000000
--- a/src/collectors/python.d.plugin/python_modules/urllib3/__init__.py
+++ /dev/null
@@ -1,98 +0,0 @@
-# SPDX-License-Identifier: MIT
-"""
-urllib3 - Thread-safe connection pooling and re-using.
-"""
-
-from __future__ import absolute_import
-import warnings
-
-from .connectionpool import (
- HTTPConnectionPool,
- HTTPSConnectionPool,
- connection_from_url
-)
-
-from . import exceptions
-from .filepost import encode_multipart_formdata
-from .poolmanager import PoolManager, ProxyManager, proxy_from_url
-from .response import HTTPResponse
-from .util.request import make_headers
-from .util.url import get_host
-from .util.timeout import Timeout
-from .util.retry import Retry
-
-
-# Set default logging handler to avoid "No handler found" warnings.
-import logging
-try: # Python 2.7+
- from logging import NullHandler
-except ImportError:
- class NullHandler(logging.Handler):
- def emit(self, record):
- pass
-
-__author__ = 'Andrey Petrov (andrey.petrov@shazow.net)'
-__license__ = 'MIT'
-__version__ = '1.21.1'
-
-__all__ = (
- 'HTTPConnectionPool',
- 'HTTPSConnectionPool',
- 'PoolManager',
- 'ProxyManager',
- 'HTTPResponse',
- 'Retry',
- 'Timeout',
- 'add_stderr_logger',
- 'connection_from_url',
- 'disable_warnings',
- 'encode_multipart_formdata',
- 'get_host',
- 'make_headers',
- 'proxy_from_url',
-)
-
-logging.getLogger(__name__).addHandler(NullHandler())
-
-
-def add_stderr_logger(level=logging.DEBUG):
- """
- Helper for quickly adding a StreamHandler to the logger. Useful for
- debugging.
-
- Returns the handler after adding it.
- """
- # This method needs to be in this __init__.py to get the __name__ correct
- # even if urllib3 is vendored within another package.
- logger = logging.getLogger(__name__)
- handler = logging.StreamHandler()
- handler.setFormatter(logging.Formatter('%(asctime)s %(levelname)s %(message)s'))
- logger.addHandler(handler)
- logger.setLevel(level)
- logger.debug('Added a stderr logging handler to logger: %s', __name__)
- return handler
-
-
-# ... Clean up.
-del NullHandler
-
-
-# All warning filters *must* be appended unless you're really certain that they
-# shouldn't be: otherwise, it's very hard for users to use most Python
-# mechanisms to silence them.
-# SecurityWarning's always go off by default.
-warnings.simplefilter('always', exceptions.SecurityWarning, append=True)
-# SubjectAltNameWarning's should go off once per host
-warnings.simplefilter('default', exceptions.SubjectAltNameWarning, append=True)
-# InsecurePlatformWarning's don't vary between requests, so we keep it default.
-warnings.simplefilter('default', exceptions.InsecurePlatformWarning,
- append=True)
-# SNIMissingWarnings should go off only once.
-warnings.simplefilter('default', exceptions.SNIMissingWarning, append=True)
-
-
-def disable_warnings(category=exceptions.HTTPWarning):
- """
- Helper for quickly disabling all urllib3 warnings.
- """
- warnings.simplefilter('ignore', category)
diff --git a/src/collectors/python.d.plugin/python_modules/urllib3/_collections.py b/src/collectors/python.d.plugin/python_modules/urllib3/_collections.py
deleted file mode 100644
index 2a6b3ec70..000000000
--- a/src/collectors/python.d.plugin/python_modules/urllib3/_collections.py
+++ /dev/null
@@ -1,320 +0,0 @@
-# SPDX-License-Identifier: MIT
-from __future__ import absolute_import
-
-try:
- from collections import Mapping, MutableMapping
-except ImportError:
- from collections.abc import Mapping, MutableMapping
-
-try:
- from threading import RLock
-except ImportError: # Platform-specific: No threads available
- class RLock:
- def __enter__(self):
- pass
-
- def __exit__(self, exc_type, exc_value, traceback):
- pass
-
-
-try: # Python 2.7+
- from collections import OrderedDict
-except ImportError:
- from .packages.ordered_dict import OrderedDict
-from .packages.six import iterkeys, itervalues, PY3
-
-
-__all__ = ['RecentlyUsedContainer', 'HTTPHeaderDict']
-
-
-_Null = object()
-
-
-class RecentlyUsedContainer(MutableMapping):
- """
- Provides a thread-safe dict-like container which maintains up to
- ``maxsize`` keys while throwing away the least-recently-used keys beyond
- ``maxsize``.
-
- :param maxsize:
- Maximum number of recent elements to retain.
-
- :param dispose_func:
- Every time an item is evicted from the container,
- ``dispose_func(value)`` is called. Callback which will get called
- """
-
- ContainerCls = OrderedDict
-
- def __init__(self, maxsize=10, dispose_func=None):
- self._maxsize = maxsize
- self.dispose_func = dispose_func
-
- self._container = self.ContainerCls()
- self.lock = RLock()
-
- def __getitem__(self, key):
- # Re-insert the item, moving it to the end of the eviction line.
- with self.lock:
- item = self._container.pop(key)
- self._container[key] = item
- return item
-
- def __setitem__(self, key, value):
- evicted_value = _Null
- with self.lock:
- # Possibly evict the existing value of 'key'
- evicted_value = self._container.get(key, _Null)
- self._container[key] = value
-
- # If we didn't evict an existing value, we might have to evict the
- # least recently used item from the beginning of the container.
- if len(self._container) > self._maxsize:
- _key, evicted_value = self._container.popitem(last=False)
-
- if self.dispose_func and evicted_value is not _Null:
- self.dispose_func(evicted_value)
-
- def __delitem__(self, key):
- with self.lock:
- value = self._container.pop(key)
-
- if self.dispose_func:
- self.dispose_func(value)
-
- def __len__(self):
- with self.lock:
- return len(self._container)
-
- def __iter__(self):
- raise NotImplementedError('Iteration over this class is unlikely to be threadsafe.')
-
- def clear(self):
- with self.lock:
- # Copy pointers to all values, then wipe the mapping
- values = list(itervalues(self._container))
- self._container.clear()
-
- if self.dispose_func:
- for value in values:
- self.dispose_func(value)
-
- def keys(self):
- with self.lock:
- return list(iterkeys(self._container))
-
-
-class HTTPHeaderDict(MutableMapping):
- """
- :param headers:
- An iterable of field-value pairs. Must not contain multiple field names
- when compared case-insensitively.
-
- :param kwargs:
- Additional field-value pairs to pass in to ``dict.update``.
-
- A ``dict`` like container for storing HTTP Headers.
-
- Field names are stored and compared case-insensitively in compliance with
- RFC 7230. Iteration provides the first case-sensitive key seen for each
- case-insensitive pair.
-
- Using ``__setitem__`` syntax overwrites fields that compare equal
- case-insensitively in order to maintain ``dict``'s api. For fields that
- compare equal, instead create a new ``HTTPHeaderDict`` and use ``.add``
- in a loop.
-
- If multiple fields that are equal case-insensitively are passed to the
- constructor or ``.update``, the behavior is undefined and some will be
- lost.
-
- >>> headers = HTTPHeaderDict()
- >>> headers.add('Set-Cookie', 'foo=bar')
- >>> headers.add('set-cookie', 'baz=quxx')
- >>> headers['content-length'] = '7'
- >>> headers['SET-cookie']
- 'foo=bar, baz=quxx'
- >>> headers['Content-Length']
- '7'
- """
-
- def __init__(self, headers=None, **kwargs):
- super(HTTPHeaderDict, self).__init__()
- self._container = OrderedDict()
- if headers is not None:
- if isinstance(headers, HTTPHeaderDict):
- self._copy_from(headers)
- else:
- self.extend(headers)
- if kwargs:
- self.extend(kwargs)
-
- def __setitem__(self, key, val):
- self._container[key.lower()] = [key, val]
- return self._container[key.lower()]
-
- def __getitem__(self, key):
- val = self._container[key.lower()]
- return ', '.join(val[1:])
-
- def __delitem__(self, key):
- del self._container[key.lower()]
-
- def __contains__(self, key):
- return key.lower() in self._container
-
- def __eq__(self, other):
- if not isinstance(other, Mapping) and not hasattr(other, 'keys'):
- return False
- if not isinstance(other, type(self)):
- other = type(self)(other)
- return (dict((k.lower(), v) for k, v in self.itermerged()) ==
- dict((k.lower(), v) for k, v in other.itermerged()))
-
- def __ne__(self, other):
- return not self.__eq__(other)
-
- if not PY3: # Python 2
- iterkeys = MutableMapping.iterkeys
- itervalues = MutableMapping.itervalues
-
- __marker = object()
-
- def __len__(self):
- return len(self._container)
-
- def __iter__(self):
- # Only provide the originally cased names
- for vals in self._container.values():
- yield vals[0]
-
- def pop(self, key, default=__marker):
- '''D.pop(k[,d]) -> v, remove specified key and return the corresponding value.
- If key is not found, d is returned if given, otherwise KeyError is raised.
- '''
- # Using the MutableMapping function directly fails due to the private marker.
- # Using ordinary dict.pop would expose the internal structures.
- # So let's reinvent the wheel.
- try:
- value = self[key]
- except KeyError:
- if default is self.__marker:
- raise
- return default
- else:
- del self[key]
- return value
-
- def discard(self, key):
- try:
- del self[key]
- except KeyError:
- pass
-
- def add(self, key, val):
- """Adds a (name, value) pair, doesn't overwrite the value if it already
- exists.
-
- >>> headers = HTTPHeaderDict(foo='bar')
- >>> headers.add('Foo', 'baz')
- >>> headers['foo']
- 'bar, baz'
- """
- key_lower = key.lower()
- new_vals = [key, val]
- # Keep the common case aka no item present as fast as possible
- vals = self._container.setdefault(key_lower, new_vals)
- if new_vals is not vals:
- vals.append(val)
-
- def extend(self, *args, **kwargs):
- """Generic import function for any type of header-like object.
- Adapted version of MutableMapping.update in order to insert items
- with self.add instead of self.__setitem__
- """
- if len(args) > 1:
- raise TypeError("extend() takes at most 1 positional "
- "arguments ({0} given)".format(len(args)))
- other = args[0] if len(args) >= 1 else ()
-
- if isinstance(other, HTTPHeaderDict):
- for key, val in other.iteritems():
- self.add(key, val)
- elif isinstance(other, Mapping):
- for key in other:
- self.add(key, other[key])
- elif hasattr(other, "keys"):
- for key in other.keys():
- self.add(key, other[key])
- else:
- for key, value in other:
- self.add(key, value)
-
- for key, value in kwargs.items():
- self.add(key, value)
-
- def getlist(self, key):
- """Returns a list of all the values for the named field. Returns an
- empty list if the key doesn't exist."""
- try:
- vals = self._container[key.lower()]
- except KeyError:
- return []
- else:
- return vals[1:]
-
- # Backwards compatibility for httplib
- getheaders = getlist
- getallmatchingheaders = getlist
- iget = getlist
-
- def __repr__(self):
- return "%s(%s)" % (type(self).__name__, dict(self.itermerged()))
-
- def _copy_from(self, other):
- for key in other:
- val = other.getlist(key)
- if isinstance(val, list):
- # Don't need to convert tuples
- val = list(val)
- self._container[key.lower()] = [key] + val
-
- def copy(self):
- clone = type(self)()
- clone._copy_from(self)
- return clone
-
- def iteritems(self):
- """Iterate over all header lines, including duplicate ones."""
- for key in self:
- vals = self._container[key.lower()]
- for val in vals[1:]:
- yield vals[0], val
-
- def itermerged(self):
- """Iterate over all headers, merging duplicate ones together."""
- for key in self:
- val = self._container[key.lower()]
- yield val[0], ', '.join(val[1:])
-
- def items(self):
- return list(self.iteritems())
-
- @classmethod
- def from_httplib(cls, message): # Python 2
- """Read headers from a Python 2 httplib message object."""
- # python2.7 does not expose a proper API for exporting multiheaders
- # efficiently. This function re-reads raw lines from the message
- # object and extracts the multiheaders properly.
- headers = []
-
- for line in message.headers:
- if line.startswith((' ', '\t')):
- key, value = headers[-1]
- headers[-1] = (key, value + '\r\n' + line.rstrip())
- continue
-
- key, value = line.split(':', 1)
- headers.append((key, value.strip()))
-
- return cls(headers)
diff --git a/src/collectors/python.d.plugin/python_modules/urllib3/connection.py b/src/collectors/python.d.plugin/python_modules/urllib3/connection.py
deleted file mode 100644
index f757493c7..000000000
--- a/src/collectors/python.d.plugin/python_modules/urllib3/connection.py
+++ /dev/null
@@ -1,374 +0,0 @@
-# SPDX-License-Identifier: MIT
-from __future__ import absolute_import
-import datetime
-import logging
-import os
-import sys
-import socket
-from socket import error as SocketError, timeout as SocketTimeout
-import warnings
-from .packages import six
-from .packages.six.moves.http_client import HTTPConnection as _HTTPConnection
-from .packages.six.moves.http_client import HTTPException # noqa: F401
-
-try: # Compiled with SSL?
- import ssl
- BaseSSLError = ssl.SSLError
-except (ImportError, AttributeError): # Platform-specific: No SSL.
- ssl = None
-
- class BaseSSLError(BaseException):
- pass
-
-
-try: # Python 3:
- # Not a no-op, we're adding this to the namespace so it can be imported.
- ConnectionError = ConnectionError
-except NameError: # Python 2:
- class ConnectionError(Exception):
- pass
-
-
-from .exceptions import (
- NewConnectionError,
- ConnectTimeoutError,
- SubjectAltNameWarning,
- SystemTimeWarning,
-)
-from .packages.ssl_match_hostname import match_hostname, CertificateError
-
-from .util.ssl_ import (
- resolve_cert_reqs,
- resolve_ssl_version,
- assert_fingerprint,
- create_urllib3_context,
- ssl_wrap_socket
-)
-
-
-from .util import connection
-
-from ._collections import HTTPHeaderDict
-
-log = logging.getLogger(__name__)
-
-port_by_scheme = {
- 'http': 80,
- 'https': 443,
-}
-
-# When updating RECENT_DATE, move it to
-# within two years of the current date, and no
-# earlier than 6 months ago.
-RECENT_DATE = datetime.date(2016, 1, 1)
-
-
-class DummyConnection(object):
- """Used to detect a failed ConnectionCls import."""
- pass
-
-
-class HTTPConnection(_HTTPConnection, object):
- """
- Based on httplib.HTTPConnection but provides an extra constructor
- backwards-compatibility layer between older and newer Pythons.
-
- Additional keyword parameters are used to configure attributes of the connection.
- Accepted parameters include:
-
- - ``strict``: See the documentation on :class:`urllib3.connectionpool.HTTPConnectionPool`
- - ``source_address``: Set the source address for the current connection.
-
- .. note:: This is ignored for Python 2.6. It is only applied for 2.7 and 3.x
-
- - ``socket_options``: Set specific options on the underlying socket. If not specified, then
- defaults are loaded from ``HTTPConnection.default_socket_options`` which includes disabling
- Nagle's algorithm (sets TCP_NODELAY to 1) unless the connection is behind a proxy.
-
- For example, if you wish to enable TCP Keep Alive in addition to the defaults,
- you might pass::
-
- HTTPConnection.default_socket_options + [
- (socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1),
- ]
-
- Or you may want to disable the defaults by passing an empty list (e.g., ``[]``).
- """
-
- default_port = port_by_scheme['http']
-
- #: Disable Nagle's algorithm by default.
- #: ``[(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)]``
- default_socket_options = [(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)]
-
- #: Whether this connection verifies the host's certificate.
- is_verified = False
-
- def __init__(self, *args, **kw):
- if six.PY3: # Python 3
- kw.pop('strict', None)
-
- # Pre-set source_address in case we have an older Python like 2.6.
- self.source_address = kw.get('source_address')
-
- if sys.version_info < (2, 7): # Python 2.6
- # _HTTPConnection on Python 2.6 will balk at this keyword arg, but
- # not newer versions. We can still use it when creating a
- # connection though, so we pop it *after* we have saved it as
- # self.source_address.
- kw.pop('source_address', None)
-
- #: The socket options provided by the user. If no options are
- #: provided, we use the default options.
- self.socket_options = kw.pop('socket_options', self.default_socket_options)
-
- # Superclass also sets self.source_address in Python 2.7+.
- _HTTPConnection.__init__(self, *args, **kw)
-
- def _new_conn(self):
- """ Establish a socket connection and set nodelay settings on it.
-
- :return: New socket connection.
- """
- extra_kw = {}
- if self.source_address:
- extra_kw['source_address'] = self.source_address
-
- if self.socket_options:
- extra_kw['socket_options'] = self.socket_options
-
- try:
- conn = connection.create_connection(
- (self.host, self.port), self.timeout, **extra_kw)
-
- except SocketTimeout as e:
- raise ConnectTimeoutError(
- self, "Connection to %s timed out. (connect timeout=%s)" %
- (self.host, self.timeout))
-
- except SocketError as e:
- raise NewConnectionError(
- self, "Failed to establish a new connection: %s" % e)
-
- return conn
-
- def _prepare_conn(self, conn):
- self.sock = conn
- # the _tunnel_host attribute was added in python 2.6.3 (via
- # http://hg.python.org/cpython/rev/0f57b30a152f) so pythons 2.6(0-2) do
- # not have them.
- if getattr(self, '_tunnel_host', None):
- # TODO: Fix tunnel so it doesn't depend on self.sock state.
- self._tunnel()
- # Mark this connection as not reusable
- self.auto_open = 0
-
- def connect(self):
- conn = self._new_conn()
- self._prepare_conn(conn)
-
- def request_chunked(self, method, url, body=None, headers=None):
- """
- Alternative to the common request method, which sends the
- body with chunked encoding and not as one block
- """
- headers = HTTPHeaderDict(headers if headers is not None else {})
- skip_accept_encoding = 'accept-encoding' in headers
- skip_host = 'host' in headers
- self.putrequest(
- method,
- url,
- skip_accept_encoding=skip_accept_encoding,
- skip_host=skip_host
- )
- for header, value in headers.items():
- self.putheader(header, value)
- if 'transfer-encoding' not in headers:
- self.putheader('Transfer-Encoding', 'chunked')
- self.endheaders()
-
- if body is not None:
- stringish_types = six.string_types + (six.binary_type,)
- if isinstance(body, stringish_types):
- body = (body,)
- for chunk in body:
- if not chunk:
- continue
- if not isinstance(chunk, six.binary_type):
- chunk = chunk.encode('utf8')
- len_str = hex(len(chunk))[2:]
- self.send(len_str.encode('utf-8'))
- self.send(b'\r\n')
- self.send(chunk)
- self.send(b'\r\n')
-
- # After the if clause, to always have a closed body
- self.send(b'0\r\n\r\n')
-
-
-class HTTPSConnection(HTTPConnection):
- default_port = port_by_scheme['https']
-
- ssl_version = None
-
- def __init__(self, host, port=None, key_file=None, cert_file=None,
- strict=None, timeout=socket._GLOBAL_DEFAULT_TIMEOUT,
- ssl_context=None, **kw):
-
- HTTPConnection.__init__(self, host, port, strict=strict,
- timeout=timeout, **kw)
-
- self.key_file = key_file
- self.cert_file = cert_file
- self.ssl_context = ssl_context
-
- # Required property for Google AppEngine 1.9.0 which otherwise causes
- # HTTPS requests to go out as HTTP. (See Issue #356)
- self._protocol = 'https'
-
- def connect(self):
- conn = self._new_conn()
- self._prepare_conn(conn)
-
- if self.ssl_context is None:
- self.ssl_context = create_urllib3_context(
- ssl_version=resolve_ssl_version(None),
- cert_reqs=resolve_cert_reqs(None),
- )
-
- self.sock = ssl_wrap_socket(
- sock=conn,
- keyfile=self.key_file,
- certfile=self.cert_file,
- ssl_context=self.ssl_context,
- )
-
-
-class VerifiedHTTPSConnection(HTTPSConnection):
- """
- Based on httplib.HTTPSConnection but wraps the socket with
- SSL certification.
- """
- cert_reqs = None
- ca_certs = None
- ca_cert_dir = None
- ssl_version = None
- assert_fingerprint = None
-
- def set_cert(self, key_file=None, cert_file=None,
- cert_reqs=None, ca_certs=None,
- assert_hostname=None, assert_fingerprint=None,
- ca_cert_dir=None):
- """
- This method should only be called once, before the connection is used.
- """
- # If cert_reqs is not provided, we can try to guess. If the user gave
- # us a cert database, we assume they want to use it: otherwise, if
- # they gave us an SSL Context object we should use whatever is set for
- # it.
- if cert_reqs is None:
- if ca_certs or ca_cert_dir:
- cert_reqs = 'CERT_REQUIRED'
- elif self.ssl_context is not None:
- cert_reqs = self.ssl_context.verify_mode
-
- self.key_file = key_file
- self.cert_file = cert_file
- self.cert_reqs = cert_reqs
- self.assert_hostname = assert_hostname
- self.assert_fingerprint = assert_fingerprint
- self.ca_certs = ca_certs and os.path.expanduser(ca_certs)
- self.ca_cert_dir = ca_cert_dir and os.path.expanduser(ca_cert_dir)
-
- def connect(self):
- # Add certificate verification
- conn = self._new_conn()
-
- hostname = self.host
- if getattr(self, '_tunnel_host', None):
- # _tunnel_host was added in Python 2.6.3
- # (See: http://hg.python.org/cpython/rev/0f57b30a152f)
-
- self.sock = conn
- # Calls self._set_hostport(), so self.host is
- # self._tunnel_host below.
- self._tunnel()
- # Mark this connection as not reusable
- self.auto_open = 0
-
- # Override the host with the one we're requesting data from.
- hostname = self._tunnel_host
-
- is_time_off = datetime.date.today() < RECENT_DATE
- if is_time_off:
- warnings.warn((
- 'System time is way off (before {0}). This will probably '
- 'lead to SSL verification errors').format(RECENT_DATE),
- SystemTimeWarning
- )
-
- # Wrap socket using verification with the root certs in
- # trusted_root_certs
- if self.ssl_context is None:
- self.ssl_context = create_urllib3_context(
- ssl_version=resolve_ssl_version(self.ssl_version),
- cert_reqs=resolve_cert_reqs(self.cert_reqs),
- )
-
- context = self.ssl_context
- context.verify_mode = resolve_cert_reqs(self.cert_reqs)
- self.sock = ssl_wrap_socket(
- sock=conn,
- keyfile=self.key_file,
- certfile=self.cert_file,
- ca_certs=self.ca_certs,
- ca_cert_dir=self.ca_cert_dir,
- server_hostname=hostname,
- ssl_context=context)
-
- if self.assert_fingerprint:
- assert_fingerprint(self.sock.getpeercert(binary_form=True),
- self.assert_fingerprint)
- elif context.verify_mode != ssl.CERT_NONE \
- and not getattr(context, 'check_hostname', False) \
- and self.assert_hostname is not False:
- # While urllib3 attempts to always turn off hostname matching from
- # the TLS library, this cannot always be done. So we check whether
- # the TLS Library still thinks it's matching hostnames.
- cert = self.sock.getpeercert()
- if not cert.get('subjectAltName', ()):
- warnings.warn((
- 'Certificate for {0} has no `subjectAltName`, falling back to check for a '
- '`commonName` for now. This feature is being removed by major browsers and '
- 'deprecated by RFC 2818. (See https://github.com/shazow/urllib3/issues/497 '
- 'for details.)'.format(hostname)),
- SubjectAltNameWarning
- )
- _match_hostname(cert, self.assert_hostname or hostname)
-
- self.is_verified = (
- context.verify_mode == ssl.CERT_REQUIRED or
- self.assert_fingerprint is not None
- )
-
-
-def _match_hostname(cert, asserted_hostname):
- try:
- match_hostname(cert, asserted_hostname)
- except CertificateError as e:
- log.error(
- 'Certificate did not match expected hostname: %s. '
- 'Certificate: %s', asserted_hostname, cert
- )
- # Add cert to exception and reraise so client code can inspect
- # the cert when catching the exception, if they want to
- e._peer_cert = cert
- raise
-
-
-if ssl:
- # Make a copy for testing.
- UnverifiedHTTPSConnection = HTTPSConnection
- HTTPSConnection = VerifiedHTTPSConnection
-else:
- HTTPSConnection = DummyConnection
diff --git a/src/collectors/python.d.plugin/python_modules/urllib3/connectionpool.py b/src/collectors/python.d.plugin/python_modules/urllib3/connectionpool.py
deleted file mode 100644
index 90e4c86a5..000000000
--- a/src/collectors/python.d.plugin/python_modules/urllib3/connectionpool.py
+++ /dev/null
@@ -1,900 +0,0 @@
-# SPDX-License-Identifier: MIT
-from __future__ import absolute_import
-import errno
-import logging
-import sys
-import warnings
-
-from socket import error as SocketError, timeout as SocketTimeout
-import socket
-
-
-from .exceptions import (
- ClosedPoolError,
- ProtocolError,
- EmptyPoolError,
- HeaderParsingError,
- HostChangedError,
- LocationValueError,
- MaxRetryError,
- ProxyError,
- ReadTimeoutError,
- SSLError,
- TimeoutError,
- InsecureRequestWarning,
- NewConnectionError,
-)
-from .packages.ssl_match_hostname import CertificateError
-from .packages import six
-from .packages.six.moves import queue
-from .connection import (
- port_by_scheme,
- DummyConnection,
- HTTPConnection, HTTPSConnection, VerifiedHTTPSConnection,
- HTTPException, BaseSSLError,
-)
-from .request import RequestMethods
-from .response import HTTPResponse
-
-from .util.connection import is_connection_dropped
-from .util.request import set_file_position
-from .util.response import assert_header_parsing
-from .util.retry import Retry
-from .util.timeout import Timeout
-from .util.url import get_host, Url
-
-
-if six.PY2:
- # Queue is imported for side effects on MS Windows
- import Queue as _unused_module_Queue # noqa: F401
-
-xrange = six.moves.xrange
-
-log = logging.getLogger(__name__)
-
-_Default = object()
-
-
-# Pool objects
-class ConnectionPool(object):
- """
- Base class for all connection pools, such as
- :class:`.HTTPConnectionPool` and :class:`.HTTPSConnectionPool`.
- """
-
- scheme = None
- QueueCls = queue.LifoQueue
-
- def __init__(self, host, port=None):
- if not host:
- raise LocationValueError("No host specified.")
-
- self.host = _ipv6_host(host).lower()
- self.port = port
-
- def __str__(self):
- return '%s(host=%r, port=%r)' % (type(self).__name__,
- self.host, self.port)
-
- def __enter__(self):
- return self
-
- def __exit__(self, exc_type, exc_val, exc_tb):
- self.close()
- # Return False to re-raise any potential exceptions
- return False
-
- def close(self):
- """
- Close all pooled connections and disable the pool.
- """
- pass
-
-
-# This is taken from http://hg.python.org/cpython/file/7aaba721ebc0/Lib/socket.py#l252
-_blocking_errnos = set([errno.EAGAIN, errno.EWOULDBLOCK])
-
-
-class HTTPConnectionPool(ConnectionPool, RequestMethods):
- """
- Thread-safe connection pool for one host.
-
- :param host:
- Host used for this HTTP Connection (e.g. "localhost"), passed into
- :class:`httplib.HTTPConnection`.
-
- :param port:
- Port used for this HTTP Connection (None is equivalent to 80), passed
- into :class:`httplib.HTTPConnection`.
-
- :param strict:
- Causes BadStatusLine to be raised if the status line can't be parsed
- as a valid HTTP/1.0 or 1.1 status line, passed into
- :class:`httplib.HTTPConnection`.
-
- .. note::
- Only works in Python 2. This parameter is ignored in Python 3.
-
- :param timeout:
- Socket timeout in seconds for each individual connection. This can
- be a float or integer, which sets the timeout for the HTTP request,
- or an instance of :class:`urllib3.util.Timeout` which gives you more
- fine-grained control over request timeouts. After the constructor has
- been parsed, this is always a `urllib3.util.Timeout` object.
-
- :param maxsize:
- Number of connections to save that can be reused. More than 1 is useful
- in multithreaded situations. If ``block`` is set to False, more
- connections will be created but they will not be saved once they've
- been used.
-
- :param block:
- If set to True, no more than ``maxsize`` connections will be used at
- a time. When no free connections are available, the call will block
- until a connection has been released. This is a useful side effect for
- particular multithreaded situations where one does not want to use more
- than maxsize connections per host to prevent flooding.
-
- :param headers:
- Headers to include with all requests, unless other headers are given
- explicitly.
-
- :param retries:
- Retry configuration to use by default with requests in this pool.
-
- :param _proxy:
- Parsed proxy URL, should not be used directly, instead, see
- :class:`urllib3.connectionpool.ProxyManager`"
-
- :param _proxy_headers:
- A dictionary with proxy headers, should not be used directly,
- instead, see :class:`urllib3.connectionpool.ProxyManager`"
-
- :param \\**conn_kw:
- Additional parameters are used to create fresh :class:`urllib3.connection.HTTPConnection`,
- :class:`urllib3.connection.HTTPSConnection` instances.
- """
-
- scheme = 'http'
- ConnectionCls = HTTPConnection
- ResponseCls = HTTPResponse
-
- def __init__(self, host, port=None, strict=False,
- timeout=Timeout.DEFAULT_TIMEOUT, maxsize=1, block=False,
- headers=None, retries=None,
- _proxy=None, _proxy_headers=None,
- **conn_kw):
- ConnectionPool.__init__(self, host, port)
- RequestMethods.__init__(self, headers)
-
- self.strict = strict
-
- if not isinstance(timeout, Timeout):
- timeout = Timeout.from_float(timeout)
-
- if retries is None:
- retries = Retry.DEFAULT
-
- self.timeout = timeout
- self.retries = retries
-
- self.pool = self.QueueCls(maxsize)
- self.block = block
-
- self.proxy = _proxy
- self.proxy_headers = _proxy_headers or {}
-
- # Fill the queue up so that doing get() on it will block properly
- for _ in xrange(maxsize):
- self.pool.put(None)
-
- # These are mostly for testing and debugging purposes.
- self.num_connections = 0
- self.num_requests = 0
- self.conn_kw = conn_kw
-
- if self.proxy:
- # Enable Nagle's algorithm for proxies, to avoid packet fragmentation.
- # We cannot know if the user has added default socket options, so we cannot replace the
- # list.
- self.conn_kw.setdefault('socket_options', [])
-
- def _new_conn(self):
- """
- Return a fresh :class:`HTTPConnection`.
- """
- self.num_connections += 1
- log.debug("Starting new HTTP connection (%d): %s",
- self.num_connections, self.host)
-
- conn = self.ConnectionCls(host=self.host, port=self.port,
- timeout=self.timeout.connect_timeout,
- strict=self.strict, **self.conn_kw)
- return conn
-
- def _get_conn(self, timeout=None):
- """
- Get a connection. Will return a pooled connection if one is available.
-
- If no connections are available and :prop:`.block` is ``False``, then a
- fresh connection is returned.
-
- :param timeout:
- Seconds to wait before giving up and raising
- :class:`urllib3.exceptions.EmptyPoolError` if the pool is empty and
- :prop:`.block` is ``True``.
- """
- conn = None
- try:
- conn = self.pool.get(block=self.block, timeout=timeout)
-
- except AttributeError: # self.pool is None
- raise ClosedPoolError(self, "Pool is closed.")
-
- except queue.Empty:
- if self.block:
- raise EmptyPoolError(self,
- "Pool reached maximum size and no more "
- "connections are allowed.")
- pass # Oh well, we'll create a new connection then
-
- # If this is a persistent connection, check if it got disconnected
- if conn and is_connection_dropped(conn):
- log.debug("Resetting dropped connection: %s", self.host)
- conn.close()
- if getattr(conn, 'auto_open', 1) == 0:
- # This is a proxied connection that has been mutated by
- # httplib._tunnel() and cannot be reused (since it would
- # attempt to bypass the proxy)
- conn = None
-
- return conn or self._new_conn()
-
- def _put_conn(self, conn):
- """
- Put a connection back into the pool.
-
- :param conn:
- Connection object for the current host and port as returned by
- :meth:`._new_conn` or :meth:`._get_conn`.
-
- If the pool is already full, the connection is closed and discarded
- because we exceeded maxsize. If connections are discarded frequently,
- then maxsize should be increased.
-
- If the pool is closed, then the connection will be closed and discarded.
- """
- try:
- self.pool.put(conn, block=False)
- return # Everything is dandy, done.
- except AttributeError:
- # self.pool is None.
- pass
- except queue.Full:
- # This should never happen if self.block == True
- log.warning(
- "Connection pool is full, discarding connection: %s",
- self.host)
-
- # Connection never got put back into the pool, close it.
- if conn:
- conn.close()
-
- def _validate_conn(self, conn):
- """
- Called right before a request is made, after the socket is created.
- """
- pass
-
- def _prepare_proxy(self, conn):
- # Nothing to do for HTTP connections.
- pass
-
- def _get_timeout(self, timeout):
- """ Helper that always returns a :class:`urllib3.util.Timeout` """
- if timeout is _Default:
- return self.timeout.clone()
-
- if isinstance(timeout, Timeout):
- return timeout.clone()
- else:
- # User passed us an int/float. This is for backwards compatibility,
- # can be removed later
- return Timeout.from_float(timeout)
-
- def _raise_timeout(self, err, url, timeout_value):
- """Is the error actually a timeout? Will raise a ReadTimeout or pass"""
-
- if isinstance(err, SocketTimeout):
- raise ReadTimeoutError(self, url, "Read timed out. (read timeout=%s)" % timeout_value)
-
- # See the above comment about EAGAIN in Python 3. In Python 2 we have
- # to specifically catch it and throw the timeout error
- if hasattr(err, 'errno') and err.errno in _blocking_errnos:
- raise ReadTimeoutError(self, url, "Read timed out. (read timeout=%s)" % timeout_value)
-
- # Catch possible read timeouts thrown as SSL errors. If not the
- # case, rethrow the original. We need to do this because of:
- # http://bugs.python.org/issue10272
- if 'timed out' in str(err) or 'did not complete (read)' in str(err): # Python 2.6
- raise ReadTimeoutError(self, url, "Read timed out. (read timeout=%s)" % timeout_value)
-
- def _make_request(self, conn, method, url, timeout=_Default, chunked=False,
- **httplib_request_kw):
- """
- Perform a request on a given urllib connection object taken from our
- pool.
-
- :param conn:
- a connection from one of our connection pools
-
- :param timeout:
- Socket timeout in seconds for the request. This can be a
- float or integer, which will set the same timeout value for
- the socket connect and the socket read, or an instance of
- :class:`urllib3.util.Timeout`, which gives you more fine-grained
- control over your timeouts.
- """
- self.num_requests += 1
-
- timeout_obj = self._get_timeout(timeout)
- timeout_obj.start_connect()
- conn.timeout = timeout_obj.connect_timeout
-
- # Trigger any extra validation we need to do.
- try:
- self._validate_conn(conn)
- except (SocketTimeout, BaseSSLError) as e:
- # Py2 raises this as a BaseSSLError, Py3 raises it as socket timeout.
- self._raise_timeout(err=e, url=url, timeout_value=conn.timeout)
- raise
-
- # conn.request() calls httplib.*.request, not the method in
- # urllib3.request. It also calls makefile (recv) on the socket.
- if chunked:
- conn.request_chunked(method, url, **httplib_request_kw)
- else:
- conn.request(method, url, **httplib_request_kw)
-
- # Reset the timeout for the recv() on the socket
- read_timeout = timeout_obj.read_timeout
-
- # App Engine doesn't have a sock attr
- if getattr(conn, 'sock', None):
- # In Python 3 socket.py will catch EAGAIN and return None when you
- # try and read into the file pointer created by http.client, which
- # instead raises a BadStatusLine exception. Instead of catching
- # the exception and assuming all BadStatusLine exceptions are read
- # timeouts, check for a zero timeout before making the request.
- if read_timeout == 0:
- raise ReadTimeoutError(
- self, url, "Read timed out. (read timeout=%s)" % read_timeout)
- if read_timeout is Timeout.DEFAULT_TIMEOUT:
- conn.sock.settimeout(socket.getdefaulttimeout())
- else: # None or a value
- conn.sock.settimeout(read_timeout)
-
- # Receive the response from the server
- try:
- try: # Python 2.7, use buffering of HTTP responses
- httplib_response = conn.getresponse(buffering=True)
- except TypeError: # Python 2.6 and older, Python 3
- try:
- httplib_response = conn.getresponse()
- except Exception as e:
- # Remove the TypeError from the exception chain in Python 3;
- # otherwise it looks like a programming error was the cause.
- six.raise_from(e, None)
- except (SocketTimeout, BaseSSLError, SocketError) as e:
- self._raise_timeout(err=e, url=url, timeout_value=read_timeout)
- raise
-
- # AppEngine doesn't have a version attr.
- http_version = getattr(conn, '_http_vsn_str', 'HTTP/?')
- log.debug("%s://%s:%s \"%s %s %s\" %s %s", self.scheme, self.host, self.port,
- method, url, http_version, httplib_response.status,
- httplib_response.length)
-
- try:
- assert_header_parsing(httplib_response.msg)
- except HeaderParsingError as hpe: # Platform-specific: Python 3
- log.warning(
- 'Failed to parse headers (url=%s): %s',
- self._absolute_url(url), hpe, exc_info=True)
-
- return httplib_response
-
- def _absolute_url(self, path):
- return Url(scheme=self.scheme, host=self.host, port=self.port, path=path).url
-
- def close(self):
- """
- Close all pooled connections and disable the pool.
- """
- # Disable access to the pool
- old_pool, self.pool = self.pool, None
-
- try:
- while True:
- conn = old_pool.get(block=False)
- if conn:
- conn.close()
-
- except queue.Empty:
- pass # Done.
-
- def is_same_host(self, url):
- """
- Check if the given ``url`` is a member of the same host as this
- connection pool.
- """
- if url.startswith('/'):
- return True
-
- # TODO: Add optional support for socket.gethostbyname checking.
- scheme, host, port = get_host(url)
-
- host = _ipv6_host(host).lower()
-
- # Use explicit default port for comparison when none is given
- if self.port and not port:
- port = port_by_scheme.get(scheme)
- elif not self.port and port == port_by_scheme.get(scheme):
- port = None
-
- return (scheme, host, port) == (self.scheme, self.host, self.port)
-
- def urlopen(self, method, url, body=None, headers=None, retries=None,
- redirect=True, assert_same_host=True, timeout=_Default,
- pool_timeout=None, release_conn=None, chunked=False,
- body_pos=None, **response_kw):
- """
- Get a connection from the pool and perform an HTTP request. This is the
- lowest level call for making a request, so you'll need to specify all
- the raw details.
-
- .. note::
-
- More commonly, it's appropriate to use a convenience method provided
- by :class:`.RequestMethods`, such as :meth:`request`.
-
- .. note::
-
- `release_conn` will only behave as expected if
- `preload_content=False` because we want to make
- `preload_content=False` the default behaviour someday soon without
- breaking backwards compatibility.
-
- :param method:
- HTTP request method (such as GET, POST, PUT, etc.)
-
- :param body:
- Data to send in the request body (useful for creating
- POST requests, see HTTPConnectionPool.post_url for
- more convenience).
-
- :param headers:
- Dictionary of custom headers to send, such as User-Agent,
- If-None-Match, etc. If None, pool headers are used. If provided,
- these headers completely replace any pool-specific headers.
-
- :param retries:
- Configure the number of retries to allow before raising a
- :class:`~urllib3.exceptions.MaxRetryError` exception.
-
- Pass ``None`` to retry until you receive a response. Pass a
- :class:`~urllib3.util.retry.Retry` object for fine-grained control
- over different types of retries.
- Pass an integer number to retry connection errors that many times,
- but no other types of errors. Pass zero to never retry.
-
- If ``False``, then retries are disabled and any exception is raised
- immediately. Also, instead of raising a MaxRetryError on redirects,
- the redirect response will be returned.
-
- :type retries: :class:`~urllib3.util.retry.Retry`, False, or an int.
-
- :param redirect:
- If True, automatically handle redirects (status codes 301, 302,
- 303, 307, 308). Each redirect counts as a retry. Disabling retries
- will disable redirect, too.
-
- :param assert_same_host:
- If ``True``, will make sure that the host of the pool requests is
- consistent else will raise HostChangedError. When False, you can
- use the pool on an HTTP proxy and request foreign hosts.
-
- :param timeout:
- If specified, overrides the default timeout for this one
- request. It may be a float (in seconds) or an instance of
- :class:`urllib3.util.Timeout`.
-
- :param pool_timeout:
- If set and the pool is set to block=True, then this method will
- block for ``pool_timeout`` seconds and raise EmptyPoolError if no
- connection is available within the time period.
-
- :param release_conn:
- If False, then the urlopen call will not release the connection
- back into the pool once a response is received (but will release if
- you read the entire contents of the response such as when
- `preload_content=True`). This is useful if you're not preloading
- the response's content immediately. You will need to call
- ``r.release_conn()`` on the response ``r`` to return the connection
- back into the pool. If None, it takes the value of
- ``response_kw.get('preload_content', True)``.
-
- :param chunked:
- If True, urllib3 will send the body using chunked transfer
- encoding. Otherwise, urllib3 will send the body using the standard
- content-length form. Defaults to False.
-
- :param int body_pos:
- Position to seek to in file-like body in the event of a retry or
- redirect. Typically this won't need to be set because urllib3 will
- auto-populate the value when needed.
-
- :param \\**response_kw:
- Additional parameters are passed to
- :meth:`urllib3.response.HTTPResponse.from_httplib`
- """
- if headers is None:
- headers = self.headers
-
- if not isinstance(retries, Retry):
- retries = Retry.from_int(retries, redirect=redirect, default=self.retries)
-
- if release_conn is None:
- release_conn = response_kw.get('preload_content', True)
-
- # Check host
- if assert_same_host and not self.is_same_host(url):
- raise HostChangedError(self, url, retries)
-
- conn = None
-
- # Track whether `conn` needs to be released before
- # returning/raising/recursing. Update this variable if necessary, and
- # leave `release_conn` constant throughout the function. That way, if
- # the function recurses, the original value of `release_conn` will be
- # passed down into the recursive call, and its value will be respected.
- #
- # See issue #651 [1] for details.
- #
- # [1] <https://github.com/shazow/urllib3/issues/651>
- release_this_conn = release_conn
-
- # Merge the proxy headers. Only do this in HTTP. We have to copy the
- # headers dict so we can safely change it without those changes being
- # reflected in anyone else's copy.
- if self.scheme == 'http':
- headers = headers.copy()
- headers.update(self.proxy_headers)
-
- # Must keep the exception bound to a separate variable or else Python 3
- # complains about UnboundLocalError.
- err = None
-
- # Keep track of whether we cleanly exited the except block. This
- # ensures we do proper cleanup in finally.
- clean_exit = False
-
- # Rewind body position, if needed. Record current position
- # for future rewinds in the event of a redirect/retry.
- body_pos = set_file_position(body, body_pos)
-
- try:
- # Request a connection from the queue.
- timeout_obj = self._get_timeout(timeout)
- conn = self._get_conn(timeout=pool_timeout)
-
- conn.timeout = timeout_obj.connect_timeout
-
- is_new_proxy_conn = self.proxy is not None and not getattr(conn, 'sock', None)
- if is_new_proxy_conn:
- self._prepare_proxy(conn)
-
- # Make the request on the httplib connection object.
- httplib_response = self._make_request(conn, method, url,
- timeout=timeout_obj,
- body=body, headers=headers,
- chunked=chunked)
-
- # If we're going to release the connection in ``finally:``, then
- # the response doesn't need to know about the connection. Otherwise
- # it will also try to release it and we'll have a double-release
- # mess.
- response_conn = conn if not release_conn else None
-
- # Pass method to Response for length checking
- response_kw['request_method'] = method
-
- # Import httplib's response into our own wrapper object
- response = self.ResponseCls.from_httplib(httplib_response,
- pool=self,
- connection=response_conn,
- retries=retries,
- **response_kw)
-
- # Everything went great!
- clean_exit = True
-
- except queue.Empty:
- # Timed out by queue.
- raise EmptyPoolError(self, "No pool connections are available.")
-
- except (BaseSSLError, CertificateError) as e:
- # Close the connection. If a connection is reused on which there
- # was a Certificate error, the next request will certainly raise
- # another Certificate error.
- clean_exit = False
- raise SSLError(e)
-
- except SSLError:
- # Treat SSLError separately from BaseSSLError to preserve
- # traceback.
- clean_exit = False
- raise
-
- except (TimeoutError, HTTPException, SocketError, ProtocolError) as e:
- # Discard the connection for these exceptions. It will be
- # be replaced during the next _get_conn() call.
- clean_exit = False
-
- if isinstance(e, (SocketError, NewConnectionError)) and self.proxy:
- e = ProxyError('Cannot connect to proxy.', e)
- elif isinstance(e, (SocketError, HTTPException)):
- e = ProtocolError('Connection aborted.', e)
-
- retries = retries.increment(method, url, error=e, _pool=self,
- _stacktrace=sys.exc_info()[2])
- retries.sleep()
-
- # Keep track of the error for the retry warning.
- err = e
-
- finally:
- if not clean_exit:
- # We hit some kind of exception, handled or otherwise. We need
- # to throw the connection away unless explicitly told not to.
- # Close the connection, set the variable to None, and make sure
- # we put the None back in the pool to avoid leaking it.
- conn = conn and conn.close()
- release_this_conn = True
-
- if release_this_conn:
- # Put the connection back to be reused. If the connection is
- # expired then it will be None, which will get replaced with a
- # fresh connection during _get_conn.
- self._put_conn(conn)
-
- if not conn:
- # Try again
- log.warning("Retrying (%r) after connection "
- "broken by '%r': %s", retries, err, url)
- return self.urlopen(method, url, body, headers, retries,
- redirect, assert_same_host,
- timeout=timeout, pool_timeout=pool_timeout,
- release_conn=release_conn, body_pos=body_pos,
- **response_kw)
-
- # Handle redirect?
- redirect_location = redirect and response.get_redirect_location()
- if redirect_location:
- if response.status == 303:
- method = 'GET'
-
- try:
- retries = retries.increment(method, url, response=response, _pool=self)
- except MaxRetryError:
- if retries.raise_on_redirect:
- # Release the connection for this response, since we're not
- # returning it to be released manually.
- response.release_conn()
- raise
- return response
-
- retries.sleep_for_retry(response)
- log.debug("Redirecting %s -> %s", url, redirect_location)
- return self.urlopen(
- method, redirect_location, body, headers,
- retries=retries, redirect=redirect,
- assert_same_host=assert_same_host,
- timeout=timeout, pool_timeout=pool_timeout,
- release_conn=release_conn, body_pos=body_pos,
- **response_kw)
-
- # Check if we should retry the HTTP response.
- has_retry_after = bool(response.getheader('Retry-After'))
- if retries.is_retry(method, response.status, has_retry_after):
- try:
- retries = retries.increment(method, url, response=response, _pool=self)
- except MaxRetryError:
- if retries.raise_on_status:
- # Release the connection for this response, since we're not
- # returning it to be released manually.
- response.release_conn()
- raise
- return response
- retries.sleep(response)
- log.debug("Retry: %s", url)
- return self.urlopen(
- method, url, body, headers,
- retries=retries, redirect=redirect,
- assert_same_host=assert_same_host,
- timeout=timeout, pool_timeout=pool_timeout,
- release_conn=release_conn,
- body_pos=body_pos, **response_kw)
-
- return response
-
-
-class HTTPSConnectionPool(HTTPConnectionPool):
- """
- Same as :class:`.HTTPConnectionPool`, but HTTPS.
-
- When Python is compiled with the :mod:`ssl` module, then
- :class:`.VerifiedHTTPSConnection` is used, which *can* verify certificates,
- instead of :class:`.HTTPSConnection`.
-
- :class:`.VerifiedHTTPSConnection` uses one of ``assert_fingerprint``,
- ``assert_hostname`` and ``host`` in this order to verify connections.
- If ``assert_hostname`` is False, no verification is done.
-
- The ``key_file``, ``cert_file``, ``cert_reqs``, ``ca_certs``,
- ``ca_cert_dir``, and ``ssl_version`` are only used if :mod:`ssl` is
- available and are fed into :meth:`urllib3.util.ssl_wrap_socket` to upgrade
- the connection socket into an SSL socket.
- """
-
- scheme = 'https'
- ConnectionCls = HTTPSConnection
-
- def __init__(self, host, port=None,
- strict=False, timeout=Timeout.DEFAULT_TIMEOUT, maxsize=1,
- block=False, headers=None, retries=None,
- _proxy=None, _proxy_headers=None,
- key_file=None, cert_file=None, cert_reqs=None,
- ca_certs=None, ssl_version=None,
- assert_hostname=None, assert_fingerprint=None,
- ca_cert_dir=None, **conn_kw):
-
- HTTPConnectionPool.__init__(self, host, port, strict, timeout, maxsize,
- block, headers, retries, _proxy, _proxy_headers,
- **conn_kw)
-
- if ca_certs and cert_reqs is None:
- cert_reqs = 'CERT_REQUIRED'
-
- self.key_file = key_file
- self.cert_file = cert_file
- self.cert_reqs = cert_reqs
- self.ca_certs = ca_certs
- self.ca_cert_dir = ca_cert_dir
- self.ssl_version = ssl_version
- self.assert_hostname = assert_hostname
- self.assert_fingerprint = assert_fingerprint
-
- def _prepare_conn(self, conn):
- """
- Prepare the ``connection`` for :meth:`urllib3.util.ssl_wrap_socket`
- and establish the tunnel if proxy is used.
- """
-
- if isinstance(conn, VerifiedHTTPSConnection):
- conn.set_cert(key_file=self.key_file,
- cert_file=self.cert_file,
- cert_reqs=self.cert_reqs,
- ca_certs=self.ca_certs,
- ca_cert_dir=self.ca_cert_dir,
- assert_hostname=self.assert_hostname,
- assert_fingerprint=self.assert_fingerprint)
- conn.ssl_version = self.ssl_version
- return conn
-
- def _prepare_proxy(self, conn):
- """
- Establish tunnel connection early, because otherwise httplib
- would improperly set Host: header to proxy's IP:port.
- """
- # Python 2.7+
- try:
- set_tunnel = conn.set_tunnel
- except AttributeError: # Platform-specific: Python 2.6
- set_tunnel = conn._set_tunnel
-
- if sys.version_info <= (2, 6, 4) and not self.proxy_headers: # Python 2.6.4 and older
- set_tunnel(self.host, self.port)
- else:
- set_tunnel(self.host, self.port, self.proxy_headers)
-
- conn.connect()
-
- def _new_conn(self):
- """
- Return a fresh :class:`httplib.HTTPSConnection`.
- """
- self.num_connections += 1
- log.debug("Starting new HTTPS connection (%d): %s",
- self.num_connections, self.host)
-
- if not self.ConnectionCls or self.ConnectionCls is DummyConnection:
- raise SSLError("Can't connect to HTTPS URL because the SSL "
- "module is not available.")
-
- actual_host = self.host
- actual_port = self.port
- if self.proxy is not None:
- actual_host = self.proxy.host
- actual_port = self.proxy.port
-
- conn = self.ConnectionCls(host=actual_host, port=actual_port,
- timeout=self.timeout.connect_timeout,
- strict=self.strict, **self.conn_kw)
-
- return self._prepare_conn(conn)
-
- def _validate_conn(self, conn):
- """
- Called right before a request is made, after the socket is created.
- """
- super(HTTPSConnectionPool, self)._validate_conn(conn)
-
- # Force connect early to allow us to validate the connection.
- if not getattr(conn, 'sock', None): # AppEngine might not have `.sock`
- conn.connect()
-
- if not conn.is_verified:
- warnings.warn((
- 'Unverified HTTPS request is being made. '
- 'Adding certificate verification is strongly advised. See: '
- 'https://urllib3.readthedocs.io/en/latest/advanced-usage.html'
- '#ssl-warnings'),
- InsecureRequestWarning)
-
-
-def connection_from_url(url, **kw):
- """
- Given a url, return an :class:`.ConnectionPool` instance of its host.
-
- This is a shortcut for not having to parse out the scheme, host, and port
- of the url before creating an :class:`.ConnectionPool` instance.
-
- :param url:
- Absolute URL string that must include the scheme. Port is optional.
-
- :param \\**kw:
- Passes additional parameters to the constructor of the appropriate
- :class:`.ConnectionPool`. Useful for specifying things like
- timeout, maxsize, headers, etc.
-
- Example::
-
- >>> conn = connection_from_url('http://google.com/')
- >>> r = conn.request('GET', '/')
- """
- scheme, host, port = get_host(url)
- port = port or port_by_scheme.get(scheme, 80)
- if scheme == 'https':
- return HTTPSConnectionPool(host, port=port, **kw)
- else:
- return HTTPConnectionPool(host, port=port, **kw)
-
-
-def _ipv6_host(host):
- """
- Process IPv6 address literals
- """
-
- # httplib doesn't like it when we include brackets in IPv6 addresses
- # Specifically, if we include brackets but also pass the port then
- # httplib crazily doubles up the square brackets on the Host header.
- # Instead, we need to make sure we never pass ``None`` as the port.
- # However, for backward compatibility reasons we can't actually
- # *assert* that. See http://bugs.python.org/issue28539
- #
- # Also if an IPv6 address literal has a zone identifier, the
- # percent sign might be URIencoded, convert it back into ASCII
- if host.startswith('[') and host.endswith(']'):
- host = host.replace('%25', '%').strip('[]')
- return host
diff --git a/src/collectors/python.d.plugin/python_modules/urllib3/contrib/__init__.py b/src/collectors/python.d.plugin/python_modules/urllib3/contrib/__init__.py
deleted file mode 100644
index e69de29bb..000000000
--- a/src/collectors/python.d.plugin/python_modules/urllib3/contrib/__init__.py
+++ /dev/null
diff --git a/src/collectors/python.d.plugin/python_modules/urllib3/contrib/_securetransport/__init__.py b/src/collectors/python.d.plugin/python_modules/urllib3/contrib/_securetransport/__init__.py
deleted file mode 100644
index e69de29bb..000000000
--- a/src/collectors/python.d.plugin/python_modules/urllib3/contrib/_securetransport/__init__.py
+++ /dev/null
diff --git a/src/collectors/python.d.plugin/python_modules/urllib3/contrib/_securetransport/bindings.py b/src/collectors/python.d.plugin/python_modules/urllib3/contrib/_securetransport/bindings.py
deleted file mode 100644
index bb826673f..000000000
--- a/src/collectors/python.d.plugin/python_modules/urllib3/contrib/_securetransport/bindings.py
+++ /dev/null
@@ -1,591 +0,0 @@
-# SPDX-License-Identifier: MIT
-"""
-This module uses ctypes to bind a whole bunch of functions and constants from
-SecureTransport. The goal here is to provide the low-level API to
-SecureTransport. These are essentially the C-level functions and constants, and
-they're pretty gross to work with.
-
-This code is a bastardised version of the code found in Will Bond's oscrypto
-library. An enormous debt is owed to him for blazing this trail for us. For
-that reason, this code should be considered to be covered both by urllib3's
-license and by oscrypto's:
-
- Copyright (c) 2015-2016 Will Bond <will@wbond.net>
-
- Permission is hereby granted, free of charge, to any person obtaining a
- copy of this software and associated documentation files (the "Software"),
- to deal in the Software without restriction, including without limitation
- the rights to use, copy, modify, merge, publish, distribute, sublicense,
- and/or sell copies of the Software, and to permit persons to whom the
- Software is furnished to do so, subject to the following conditions:
-
- The above copyright notice and this permission notice shall be included in
- all copies or substantial portions of the Software.
-
- THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- DEALINGS IN THE SOFTWARE.
-"""
-from __future__ import absolute_import
-
-import platform
-from ctypes.util import find_library
-from ctypes import (
- c_void_p, c_int32, c_char_p, c_size_t, c_byte, c_uint32, c_ulong, c_long,
- c_bool
-)
-from ctypes import CDLL, POINTER, CFUNCTYPE
-
-
-security_path = find_library('Security')
-if not security_path:
- raise ImportError('The library Security could not be found')
-
-
-core_foundation_path = find_library('CoreFoundation')
-if not core_foundation_path:
- raise ImportError('The library CoreFoundation could not be found')
-
-
-version = platform.mac_ver()[0]
-version_info = tuple(map(int, version.split('.')))
-if version_info < (10, 8):
- raise OSError(
- 'Only OS X 10.8 and newer are supported, not %s.%s' % (
- version_info[0], version_info[1]
- )
- )
-
-Security = CDLL(security_path, use_errno=True)
-CoreFoundation = CDLL(core_foundation_path, use_errno=True)
-
-Boolean = c_bool
-CFIndex = c_long
-CFStringEncoding = c_uint32
-CFData = c_void_p
-CFString = c_void_p
-CFArray = c_void_p
-CFMutableArray = c_void_p
-CFDictionary = c_void_p
-CFError = c_void_p
-CFType = c_void_p
-CFTypeID = c_ulong
-
-CFTypeRef = POINTER(CFType)
-CFAllocatorRef = c_void_p
-
-OSStatus = c_int32
-
-CFDataRef = POINTER(CFData)
-CFStringRef = POINTER(CFString)
-CFArrayRef = POINTER(CFArray)
-CFMutableArrayRef = POINTER(CFMutableArray)
-CFDictionaryRef = POINTER(CFDictionary)
-CFArrayCallBacks = c_void_p
-CFDictionaryKeyCallBacks = c_void_p
-CFDictionaryValueCallBacks = c_void_p
-
-SecCertificateRef = POINTER(c_void_p)
-SecExternalFormat = c_uint32
-SecExternalItemType = c_uint32
-SecIdentityRef = POINTER(c_void_p)
-SecItemImportExportFlags = c_uint32
-SecItemImportExportKeyParameters = c_void_p
-SecKeychainRef = POINTER(c_void_p)
-SSLProtocol = c_uint32
-SSLCipherSuite = c_uint32
-SSLContextRef = POINTER(c_void_p)
-SecTrustRef = POINTER(c_void_p)
-SSLConnectionRef = c_uint32
-SecTrustResultType = c_uint32
-SecTrustOptionFlags = c_uint32
-SSLProtocolSide = c_uint32
-SSLConnectionType = c_uint32
-SSLSessionOption = c_uint32
-
-
-try:
- Security.SecItemImport.argtypes = [
- CFDataRef,
- CFStringRef,
- POINTER(SecExternalFormat),
- POINTER(SecExternalItemType),
- SecItemImportExportFlags,
- POINTER(SecItemImportExportKeyParameters),
- SecKeychainRef,
- POINTER(CFArrayRef),
- ]
- Security.SecItemImport.restype = OSStatus
-
- Security.SecCertificateGetTypeID.argtypes = []
- Security.SecCertificateGetTypeID.restype = CFTypeID
-
- Security.SecIdentityGetTypeID.argtypes = []
- Security.SecIdentityGetTypeID.restype = CFTypeID
-
- Security.SecKeyGetTypeID.argtypes = []
- Security.SecKeyGetTypeID.restype = CFTypeID
-
- Security.SecCertificateCreateWithData.argtypes = [
- CFAllocatorRef,
- CFDataRef
- ]
- Security.SecCertificateCreateWithData.restype = SecCertificateRef
-
- Security.SecCertificateCopyData.argtypes = [
- SecCertificateRef
- ]
- Security.SecCertificateCopyData.restype = CFDataRef
-
- Security.SecCopyErrorMessageString.argtypes = [
- OSStatus,
- c_void_p
- ]
- Security.SecCopyErrorMessageString.restype = CFStringRef
-
- Security.SecIdentityCreateWithCertificate.argtypes = [
- CFTypeRef,
- SecCertificateRef,
- POINTER(SecIdentityRef)
- ]
- Security.SecIdentityCreateWithCertificate.restype = OSStatus
-
- Security.SecKeychainCreate.argtypes = [
- c_char_p,
- c_uint32,
- c_void_p,
- Boolean,
- c_void_p,
- POINTER(SecKeychainRef)
- ]
- Security.SecKeychainCreate.restype = OSStatus
-
- Security.SecKeychainDelete.argtypes = [
- SecKeychainRef
- ]
- Security.SecKeychainDelete.restype = OSStatus
-
- Security.SecPKCS12Import.argtypes = [
- CFDataRef,
- CFDictionaryRef,
- POINTER(CFArrayRef)
- ]
- Security.SecPKCS12Import.restype = OSStatus
-
- SSLReadFunc = CFUNCTYPE(OSStatus, SSLConnectionRef, c_void_p, POINTER(c_size_t))
- SSLWriteFunc = CFUNCTYPE(OSStatus, SSLConnectionRef, POINTER(c_byte), POINTER(c_size_t))
-
- Security.SSLSetIOFuncs.argtypes = [
- SSLContextRef,
- SSLReadFunc,
- SSLWriteFunc
- ]
- Security.SSLSetIOFuncs.restype = OSStatus
-
- Security.SSLSetPeerID.argtypes = [
- SSLContextRef,
- c_char_p,
- c_size_t
- ]
- Security.SSLSetPeerID.restype = OSStatus
-
- Security.SSLSetCertificate.argtypes = [
- SSLContextRef,
- CFArrayRef
- ]
- Security.SSLSetCertificate.restype = OSStatus
-
- Security.SSLSetCertificateAuthorities.argtypes = [
- SSLContextRef,
- CFTypeRef,
- Boolean
- ]
- Security.SSLSetCertificateAuthorities.restype = OSStatus
-
- Security.SSLSetConnection.argtypes = [
- SSLContextRef,
- SSLConnectionRef
- ]
- Security.SSLSetConnection.restype = OSStatus
-
- Security.SSLSetPeerDomainName.argtypes = [
- SSLContextRef,
- c_char_p,
- c_size_t
- ]
- Security.SSLSetPeerDomainName.restype = OSStatus
-
- Security.SSLHandshake.argtypes = [
- SSLContextRef
- ]
- Security.SSLHandshake.restype = OSStatus
-
- Security.SSLRead.argtypes = [
- SSLContextRef,
- c_char_p,
- c_size_t,
- POINTER(c_size_t)
- ]
- Security.SSLRead.restype = OSStatus
-
- Security.SSLWrite.argtypes = [
- SSLContextRef,
- c_char_p,
- c_size_t,
- POINTER(c_size_t)
- ]
- Security.SSLWrite.restype = OSStatus
-
- Security.SSLClose.argtypes = [
- SSLContextRef
- ]
- Security.SSLClose.restype = OSStatus
-
- Security.SSLGetNumberSupportedCiphers.argtypes = [
- SSLContextRef,
- POINTER(c_size_t)
- ]
- Security.SSLGetNumberSupportedCiphers.restype = OSStatus
-
- Security.SSLGetSupportedCiphers.argtypes = [
- SSLContextRef,
- POINTER(SSLCipherSuite),
- POINTER(c_size_t)
- ]
- Security.SSLGetSupportedCiphers.restype = OSStatus
-
- Security.SSLSetEnabledCiphers.argtypes = [
- SSLContextRef,
- POINTER(SSLCipherSuite),
- c_size_t
- ]
- Security.SSLSetEnabledCiphers.restype = OSStatus
-
- Security.SSLGetNumberEnabledCiphers.argtype = [
- SSLContextRef,
- POINTER(c_size_t)
- ]
- Security.SSLGetNumberEnabledCiphers.restype = OSStatus
-
- Security.SSLGetEnabledCiphers.argtypes = [
- SSLContextRef,
- POINTER(SSLCipherSuite),
- POINTER(c_size_t)
- ]
- Security.SSLGetEnabledCiphers.restype = OSStatus
-
- Security.SSLGetNegotiatedCipher.argtypes = [
- SSLContextRef,
- POINTER(SSLCipherSuite)
- ]
- Security.SSLGetNegotiatedCipher.restype = OSStatus
-
- Security.SSLGetNegotiatedProtocolVersion.argtypes = [
- SSLContextRef,
- POINTER(SSLProtocol)
- ]
- Security.SSLGetNegotiatedProtocolVersion.restype = OSStatus
-
- Security.SSLCopyPeerTrust.argtypes = [
- SSLContextRef,
- POINTER(SecTrustRef)
- ]
- Security.SSLCopyPeerTrust.restype = OSStatus
-
- Security.SecTrustSetAnchorCertificates.argtypes = [
- SecTrustRef,
- CFArrayRef
- ]
- Security.SecTrustSetAnchorCertificates.restype = OSStatus
-
- Security.SecTrustSetAnchorCertificatesOnly.argstypes = [
- SecTrustRef,
- Boolean
- ]
- Security.SecTrustSetAnchorCertificatesOnly.restype = OSStatus
-
- Security.SecTrustEvaluate.argtypes = [
- SecTrustRef,
- POINTER(SecTrustResultType)
- ]
- Security.SecTrustEvaluate.restype = OSStatus
-
- Security.SecTrustGetCertificateCount.argtypes = [
- SecTrustRef
- ]
- Security.SecTrustGetCertificateCount.restype = CFIndex
-
- Security.SecTrustGetCertificateAtIndex.argtypes = [
- SecTrustRef,
- CFIndex
- ]
- Security.SecTrustGetCertificateAtIndex.restype = SecCertificateRef
-
- Security.SSLCreateContext.argtypes = [
- CFAllocatorRef,
- SSLProtocolSide,
- SSLConnectionType
- ]
- Security.SSLCreateContext.restype = SSLContextRef
-
- Security.SSLSetSessionOption.argtypes = [
- SSLContextRef,
- SSLSessionOption,
- Boolean
- ]
- Security.SSLSetSessionOption.restype = OSStatus
-
- Security.SSLSetProtocolVersionMin.argtypes = [
- SSLContextRef,
- SSLProtocol
- ]
- Security.SSLSetProtocolVersionMin.restype = OSStatus
-
- Security.SSLSetProtocolVersionMax.argtypes = [
- SSLContextRef,
- SSLProtocol
- ]
- Security.SSLSetProtocolVersionMax.restype = OSStatus
-
- Security.SecCopyErrorMessageString.argtypes = [
- OSStatus,
- c_void_p
- ]
- Security.SecCopyErrorMessageString.restype = CFStringRef
-
- Security.SSLReadFunc = SSLReadFunc
- Security.SSLWriteFunc = SSLWriteFunc
- Security.SSLContextRef = SSLContextRef
- Security.SSLProtocol = SSLProtocol
- Security.SSLCipherSuite = SSLCipherSuite
- Security.SecIdentityRef = SecIdentityRef
- Security.SecKeychainRef = SecKeychainRef
- Security.SecTrustRef = SecTrustRef
- Security.SecTrustResultType = SecTrustResultType
- Security.SecExternalFormat = SecExternalFormat
- Security.OSStatus = OSStatus
-
- Security.kSecImportExportPassphrase = CFStringRef.in_dll(
- Security, 'kSecImportExportPassphrase'
- )
- Security.kSecImportItemIdentity = CFStringRef.in_dll(
- Security, 'kSecImportItemIdentity'
- )
-
- # CoreFoundation time!
- CoreFoundation.CFRetain.argtypes = [
- CFTypeRef
- ]
- CoreFoundation.CFRetain.restype = CFTypeRef
-
- CoreFoundation.CFRelease.argtypes = [
- CFTypeRef
- ]
- CoreFoundation.CFRelease.restype = None
-
- CoreFoundation.CFGetTypeID.argtypes = [
- CFTypeRef
- ]
- CoreFoundation.CFGetTypeID.restype = CFTypeID
-
- CoreFoundation.CFStringCreateWithCString.argtypes = [
- CFAllocatorRef,
- c_char_p,
- CFStringEncoding
- ]
- CoreFoundation.CFStringCreateWithCString.restype = CFStringRef
-
- CoreFoundation.CFStringGetCStringPtr.argtypes = [
- CFStringRef,
- CFStringEncoding
- ]
- CoreFoundation.CFStringGetCStringPtr.restype = c_char_p
-
- CoreFoundation.CFStringGetCString.argtypes = [
- CFStringRef,
- c_char_p,
- CFIndex,
- CFStringEncoding
- ]
- CoreFoundation.CFStringGetCString.restype = c_bool
-
- CoreFoundation.CFDataCreate.argtypes = [
- CFAllocatorRef,
- c_char_p,
- CFIndex
- ]
- CoreFoundation.CFDataCreate.restype = CFDataRef
-
- CoreFoundation.CFDataGetLength.argtypes = [
- CFDataRef
- ]
- CoreFoundation.CFDataGetLength.restype = CFIndex
-
- CoreFoundation.CFDataGetBytePtr.argtypes = [
- CFDataRef
- ]
- CoreFoundation.CFDataGetBytePtr.restype = c_void_p
-
- CoreFoundation.CFDictionaryCreate.argtypes = [
- CFAllocatorRef,
- POINTER(CFTypeRef),
- POINTER(CFTypeRef),
- CFIndex,
- CFDictionaryKeyCallBacks,
- CFDictionaryValueCallBacks
- ]
- CoreFoundation.CFDictionaryCreate.restype = CFDictionaryRef
-
- CoreFoundation.CFDictionaryGetValue.argtypes = [
- CFDictionaryRef,
- CFTypeRef
- ]
- CoreFoundation.CFDictionaryGetValue.restype = CFTypeRef
-
- CoreFoundation.CFArrayCreate.argtypes = [
- CFAllocatorRef,
- POINTER(CFTypeRef),
- CFIndex,
- CFArrayCallBacks,
- ]
- CoreFoundation.CFArrayCreate.restype = CFArrayRef
-
- CoreFoundation.CFArrayCreateMutable.argtypes = [
- CFAllocatorRef,
- CFIndex,
- CFArrayCallBacks
- ]
- CoreFoundation.CFArrayCreateMutable.restype = CFMutableArrayRef
-
- CoreFoundation.CFArrayAppendValue.argtypes = [
- CFMutableArrayRef,
- c_void_p
- ]
- CoreFoundation.CFArrayAppendValue.restype = None
-
- CoreFoundation.CFArrayGetCount.argtypes = [
- CFArrayRef
- ]
- CoreFoundation.CFArrayGetCount.restype = CFIndex
-
- CoreFoundation.CFArrayGetValueAtIndex.argtypes = [
- CFArrayRef,
- CFIndex
- ]
- CoreFoundation.CFArrayGetValueAtIndex.restype = c_void_p
-
- CoreFoundation.kCFAllocatorDefault = CFAllocatorRef.in_dll(
- CoreFoundation, 'kCFAllocatorDefault'
- )
- CoreFoundation.kCFTypeArrayCallBacks = c_void_p.in_dll(CoreFoundation, 'kCFTypeArrayCallBacks')
- CoreFoundation.kCFTypeDictionaryKeyCallBacks = c_void_p.in_dll(
- CoreFoundation, 'kCFTypeDictionaryKeyCallBacks'
- )
- CoreFoundation.kCFTypeDictionaryValueCallBacks = c_void_p.in_dll(
- CoreFoundation, 'kCFTypeDictionaryValueCallBacks'
- )
-
- CoreFoundation.CFTypeRef = CFTypeRef
- CoreFoundation.CFArrayRef = CFArrayRef
- CoreFoundation.CFStringRef = CFStringRef
- CoreFoundation.CFDictionaryRef = CFDictionaryRef
-
-except (AttributeError):
- raise ImportError('Error initializing ctypes')
-
-
-class CFConst(object):
- """
- A class object that acts as essentially a namespace for CoreFoundation
- constants.
- """
- kCFStringEncodingUTF8 = CFStringEncoding(0x08000100)
-
-
-class SecurityConst(object):
- """
- A class object that acts as essentially a namespace for Security constants.
- """
- kSSLSessionOptionBreakOnServerAuth = 0
-
- kSSLProtocol2 = 1
- kSSLProtocol3 = 2
- kTLSProtocol1 = 4
- kTLSProtocol11 = 7
- kTLSProtocol12 = 8
-
- kSSLClientSide = 1
- kSSLStreamType = 0
-
- kSecFormatPEMSequence = 10
-
- kSecTrustResultInvalid = 0
- kSecTrustResultProceed = 1
- # This gap is present on purpose: this was kSecTrustResultConfirm, which
- # is deprecated.
- kSecTrustResultDeny = 3
- kSecTrustResultUnspecified = 4
- kSecTrustResultRecoverableTrustFailure = 5
- kSecTrustResultFatalTrustFailure = 6
- kSecTrustResultOtherError = 7
-
- errSSLProtocol = -9800
- errSSLWouldBlock = -9803
- errSSLClosedGraceful = -9805
- errSSLClosedNoNotify = -9816
- errSSLClosedAbort = -9806
-
- errSSLXCertChainInvalid = -9807
- errSSLCrypto = -9809
- errSSLInternal = -9810
- errSSLCertExpired = -9814
- errSSLCertNotYetValid = -9815
- errSSLUnknownRootCert = -9812
- errSSLNoRootCert = -9813
- errSSLHostNameMismatch = -9843
- errSSLPeerHandshakeFail = -9824
- errSSLPeerUserCancelled = -9839
- errSSLWeakPeerEphemeralDHKey = -9850
- errSSLServerAuthCompleted = -9841
- errSSLRecordOverflow = -9847
-
- errSecVerifyFailed = -67808
- errSecNoTrustSettings = -25263
- errSecItemNotFound = -25300
- errSecInvalidTrustSettings = -25262
-
- # Cipher suites. We only pick the ones our default cipher string allows.
- TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 = 0xC02C
- TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 = 0xC030
- TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 = 0xC02B
- TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 = 0xC02F
- TLS_DHE_DSS_WITH_AES_256_GCM_SHA384 = 0x00A3
- TLS_DHE_RSA_WITH_AES_256_GCM_SHA384 = 0x009F
- TLS_DHE_DSS_WITH_AES_128_GCM_SHA256 = 0x00A2
- TLS_DHE_RSA_WITH_AES_128_GCM_SHA256 = 0x009E
- TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384 = 0xC024
- TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384 = 0xC028
- TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA = 0xC00A
- TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA = 0xC014
- TLS_DHE_RSA_WITH_AES_256_CBC_SHA256 = 0x006B
- TLS_DHE_DSS_WITH_AES_256_CBC_SHA256 = 0x006A
- TLS_DHE_RSA_WITH_AES_256_CBC_SHA = 0x0039
- TLS_DHE_DSS_WITH_AES_256_CBC_SHA = 0x0038
- TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256 = 0xC023
- TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256 = 0xC027
- TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA = 0xC009
- TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA = 0xC013
- TLS_DHE_RSA_WITH_AES_128_CBC_SHA256 = 0x0067
- TLS_DHE_DSS_WITH_AES_128_CBC_SHA256 = 0x0040
- TLS_DHE_RSA_WITH_AES_128_CBC_SHA = 0x0033
- TLS_DHE_DSS_WITH_AES_128_CBC_SHA = 0x0032
- TLS_RSA_WITH_AES_256_GCM_SHA384 = 0x009D
- TLS_RSA_WITH_AES_128_GCM_SHA256 = 0x009C
- TLS_RSA_WITH_AES_256_CBC_SHA256 = 0x003D
- TLS_RSA_WITH_AES_128_CBC_SHA256 = 0x003C
- TLS_RSA_WITH_AES_256_CBC_SHA = 0x0035
- TLS_RSA_WITH_AES_128_CBC_SHA = 0x002F
diff --git a/src/collectors/python.d.plugin/python_modules/urllib3/contrib/_securetransport/low_level.py b/src/collectors/python.d.plugin/python_modules/urllib3/contrib/_securetransport/low_level.py
deleted file mode 100644
index 0f79a1372..000000000
--- a/src/collectors/python.d.plugin/python_modules/urllib3/contrib/_securetransport/low_level.py
+++ /dev/null
@@ -1,344 +0,0 @@
-# SPDX-License-Identifier: MIT
-"""
-Low-level helpers for the SecureTransport bindings.
-
-These are Python functions that are not directly related to the high-level APIs
-but are necessary to get them to work. They include a whole bunch of low-level
-CoreFoundation messing about and memory management. The concerns in this module
-are almost entirely about trying to avoid memory leaks and providing
-appropriate and useful assistance to the higher-level code.
-"""
-import base64
-import ctypes
-import itertools
-import re
-import os
-import ssl
-import tempfile
-
-from .bindings import Security, CoreFoundation, CFConst
-
-
-# This regular expression is used to grab PEM data out of a PEM bundle.
-_PEM_CERTS_RE = re.compile(
- b"-----BEGIN CERTIFICATE-----\n(.*?)\n-----END CERTIFICATE-----", re.DOTALL
-)
-
-
-def _cf_data_from_bytes(bytestring):
- """
- Given a bytestring, create a CFData object from it. This CFData object must
- be CFReleased by the caller.
- """
- return CoreFoundation.CFDataCreate(
- CoreFoundation.kCFAllocatorDefault, bytestring, len(bytestring)
- )
-
-
-def _cf_dictionary_from_tuples(tuples):
- """
- Given a list of Python tuples, create an associated CFDictionary.
- """
- dictionary_size = len(tuples)
-
- # We need to get the dictionary keys and values out in the same order.
- keys = (t[0] for t in tuples)
- values = (t[1] for t in tuples)
- cf_keys = (CoreFoundation.CFTypeRef * dictionary_size)(*keys)
- cf_values = (CoreFoundation.CFTypeRef * dictionary_size)(*values)
-
- return CoreFoundation.CFDictionaryCreate(
- CoreFoundation.kCFAllocatorDefault,
- cf_keys,
- cf_values,
- dictionary_size,
- CoreFoundation.kCFTypeDictionaryKeyCallBacks,
- CoreFoundation.kCFTypeDictionaryValueCallBacks,
- )
-
-
-def _cf_string_to_unicode(value):
- """
- Creates a Unicode string from a CFString object. Used entirely for error
- reporting.
-
- Yes, it annoys me quite a lot that this function is this complex.
- """
- value_as_void_p = ctypes.cast(value, ctypes.POINTER(ctypes.c_void_p))
-
- string = CoreFoundation.CFStringGetCStringPtr(
- value_as_void_p,
- CFConst.kCFStringEncodingUTF8
- )
- if string is None:
- buffer = ctypes.create_string_buffer(1024)
- result = CoreFoundation.CFStringGetCString(
- value_as_void_p,
- buffer,
- 1024,
- CFConst.kCFStringEncodingUTF8
- )
- if not result:
- raise OSError('Error copying C string from CFStringRef')
- string = buffer.value
- if string is not None:
- string = string.decode('utf-8')
- return string
-
-
-def _assert_no_error(error, exception_class=None):
- """
- Checks the return code and throws an exception if there is an error to
- report
- """
- if error == 0:
- return
-
- cf_error_string = Security.SecCopyErrorMessageString(error, None)
- output = _cf_string_to_unicode(cf_error_string)
- CoreFoundation.CFRelease(cf_error_string)
-
- if output is None or output == u'':
- output = u'OSStatus %s' % error
-
- if exception_class is None:
- exception_class = ssl.SSLError
-
- raise exception_class(output)
-
-
-def _cert_array_from_pem(pem_bundle):
- """
- Given a bundle of certs in PEM format, turns them into a CFArray of certs
- that can be used to validate a cert chain.
- """
- der_certs = [
- base64.b64decode(match.group(1))
- for match in _PEM_CERTS_RE.finditer(pem_bundle)
- ]
- if not der_certs:
- raise ssl.SSLError("No root certificates specified")
-
- cert_array = CoreFoundation.CFArrayCreateMutable(
- CoreFoundation.kCFAllocatorDefault,
- 0,
- ctypes.byref(CoreFoundation.kCFTypeArrayCallBacks)
- )
- if not cert_array:
- raise ssl.SSLError("Unable to allocate memory!")
-
- try:
- for der_bytes in der_certs:
- certdata = _cf_data_from_bytes(der_bytes)
- if not certdata:
- raise ssl.SSLError("Unable to allocate memory!")
- cert = Security.SecCertificateCreateWithData(
- CoreFoundation.kCFAllocatorDefault, certdata
- )
- CoreFoundation.CFRelease(certdata)
- if not cert:
- raise ssl.SSLError("Unable to build cert object!")
-
- CoreFoundation.CFArrayAppendValue(cert_array, cert)
- CoreFoundation.CFRelease(cert)
- except Exception:
- # We need to free the array before the exception bubbles further.
- # We only want to do that if an error occurs: otherwise, the caller
- # should free.
- CoreFoundation.CFRelease(cert_array)
-
- return cert_array
-
-
-def _is_cert(item):
- """
- Returns True if a given CFTypeRef is a certificate.
- """
- expected = Security.SecCertificateGetTypeID()
- return CoreFoundation.CFGetTypeID(item) == expected
-
-
-def _is_identity(item):
- """
- Returns True if a given CFTypeRef is an identity.
- """
- expected = Security.SecIdentityGetTypeID()
- return CoreFoundation.CFGetTypeID(item) == expected
-
-
-def _temporary_keychain():
- """
- This function creates a temporary Mac keychain that we can use to work with
- credentials. This keychain uses a one-time password and a temporary file to
- store the data. We expect to have one keychain per socket. The returned
- SecKeychainRef must be freed by the caller, including calling
- SecKeychainDelete.
-
- Returns a tuple of the SecKeychainRef and the path to the temporary
- directory that contains it.
- """
- # Unfortunately, SecKeychainCreate requires a path to a keychain. This
- # means we cannot use mkstemp to use a generic temporary file. Instead,
- # we're going to create a temporary directory and a filename to use there.
- # This filename will be 8 random bytes expanded into base64. We also need
- # some random bytes to password-protect the keychain we're creating, so we
- # ask for 40 random bytes.
- random_bytes = os.urandom(40)
- filename = base64.b64encode(random_bytes[:8]).decode('utf-8')
- password = base64.b64encode(random_bytes[8:]) # Must be valid UTF-8
- tempdirectory = tempfile.mkdtemp()
-
- keychain_path = os.path.join(tempdirectory, filename).encode('utf-8')
-
- # We now want to create the keychain itself.
- keychain = Security.SecKeychainRef()
- status = Security.SecKeychainCreate(
- keychain_path,
- len(password),
- password,
- False,
- None,
- ctypes.byref(keychain)
- )
- _assert_no_error(status)
-
- # Having created the keychain, we want to pass it off to the caller.
- return keychain, tempdirectory
-
-
-def _load_items_from_file(keychain, path):
- """
- Given a single file, loads all the trust objects from it into arrays and
- the keychain.
- Returns a tuple of lists: the first list is a list of identities, the
- second a list of certs.
- """
- certificates = []
- identities = []
- result_array = None
-
- with open(path, 'rb') as f:
- raw_filedata = f.read()
-
- try:
- filedata = CoreFoundation.CFDataCreate(
- CoreFoundation.kCFAllocatorDefault,
- raw_filedata,
- len(raw_filedata)
- )
- result_array = CoreFoundation.CFArrayRef()
- result = Security.SecItemImport(
- filedata, # cert data
- None, # Filename, leaving it out for now
- None, # What the type of the file is, we don't care
- None, # what's in the file, we don't care
- 0, # import flags
- None, # key params, can include passphrase in the future
- keychain, # The keychain to insert into
- ctypes.byref(result_array) # Results
- )
- _assert_no_error(result)
-
- # A CFArray is not very useful to us as an intermediary
- # representation, so we are going to extract the objects we want
- # and then free the array. We don't need to keep hold of keys: the
- # keychain already has them!
- result_count = CoreFoundation.CFArrayGetCount(result_array)
- for index in range(result_count):
- item = CoreFoundation.CFArrayGetValueAtIndex(
- result_array, index
- )
- item = ctypes.cast(item, CoreFoundation.CFTypeRef)
-
- if _is_cert(item):
- CoreFoundation.CFRetain(item)
- certificates.append(item)
- elif _is_identity(item):
- CoreFoundation.CFRetain(item)
- identities.append(item)
- finally:
- if result_array:
- CoreFoundation.CFRelease(result_array)
-
- CoreFoundation.CFRelease(filedata)
-
- return (identities, certificates)
-
-
-def _load_client_cert_chain(keychain, *paths):
- """
- Load certificates and maybe keys from a number of files. Has the end goal
- of returning a CFArray containing one SecIdentityRef, and then zero or more
- SecCertificateRef objects, suitable for use as a client certificate trust
- chain.
- """
- # Ok, the strategy.
- #
- # This relies on knowing that macOS will not give you a SecIdentityRef
- # unless you have imported a key into a keychain. This is a somewhat
- # artificial limitation of macOS (for example, it doesn't necessarily
- # affect iOS), but there is nothing inside Security.framework that lets you
- # get a SecIdentityRef without having a key in a keychain.
- #
- # So the policy here is we take all the files and iterate them in order.
- # Each one will use SecItemImport to have one or more objects loaded from
- # it. We will also point at a keychain that macOS can use to work with the
- # private key.
- #
- # Once we have all the objects, we'll check what we actually have. If we
- # already have a SecIdentityRef in hand, fab: we'll use that. Otherwise,
- # we'll take the first certificate (which we assume to be our leaf) and
- # ask the keychain to give us a SecIdentityRef with that cert's associated
- # key.
- #
- # We'll then return a CFArray containing the trust chain: one
- # SecIdentityRef and then zero-or-more SecCertificateRef objects. The
- # responsibility for freeing this CFArray will be with the caller. This
- # CFArray must remain alive for the entire connection, so in practice it
- # will be stored with a single SSLSocket, along with the reference to the
- # keychain.
- certificates = []
- identities = []
-
- # Filter out bad paths.
- paths = (path for path in paths if path)
-
- try:
- for file_path in paths:
- new_identities, new_certs = _load_items_from_file(
- keychain, file_path
- )
- identities.extend(new_identities)
- certificates.extend(new_certs)
-
- # Ok, we have everything. The question is: do we have an identity? If
- # not, we want to grab one from the first cert we have.
- if not identities:
- new_identity = Security.SecIdentityRef()
- status = Security.SecIdentityCreateWithCertificate(
- keychain,
- certificates[0],
- ctypes.byref(new_identity)
- )
- _assert_no_error(status)
- identities.append(new_identity)
-
- # We now want to release the original certificate, as we no longer
- # need it.
- CoreFoundation.CFRelease(certificates.pop(0))
-
- # We now need to build a new CFArray that holds the trust chain.
- trust_chain = CoreFoundation.CFArrayCreateMutable(
- CoreFoundation.kCFAllocatorDefault,
- 0,
- ctypes.byref(CoreFoundation.kCFTypeArrayCallBacks),
- )
- for item in itertools.chain(identities, certificates):
- # ArrayAppendValue does a CFRetain on the item. That's fine,
- # because the finally block will release our other refs to them.
- CoreFoundation.CFArrayAppendValue(trust_chain, item)
-
- return trust_chain
- finally:
- for obj in itertools.chain(identities, certificates):
- CoreFoundation.CFRelease(obj)
diff --git a/src/collectors/python.d.plugin/python_modules/urllib3/contrib/appengine.py b/src/collectors/python.d.plugin/python_modules/urllib3/contrib/appengine.py
deleted file mode 100644
index e74589fa8..000000000
--- a/src/collectors/python.d.plugin/python_modules/urllib3/contrib/appengine.py
+++ /dev/null
@@ -1,297 +0,0 @@
-# SPDX-License-Identifier: MIT
-"""
-This module provides a pool manager that uses Google App Engine's
-`URLFetch Service <https://cloud.google.com/appengine/docs/python/urlfetch>`_.
-
-Example usage::
-
- from urllib3 import PoolManager
- from urllib3.contrib.appengine import AppEngineManager, is_appengine_sandbox
-
- if is_appengine_sandbox():
- # AppEngineManager uses AppEngine's URLFetch API behind the scenes
- http = AppEngineManager()
- else:
- # PoolManager uses a socket-level API behind the scenes
- http = PoolManager()
-
- r = http.request('GET', 'https://google.com/')
-
-There are `limitations <https://cloud.google.com/appengine/docs/python/\
-urlfetch/#Python_Quotas_and_limits>`_ to the URLFetch service and it may not be
-the best choice for your application. There are three options for using
-urllib3 on Google App Engine:
-
-1. You can use :class:`AppEngineManager` with URLFetch. URLFetch is
- cost-effective in many circumstances as long as your usage is within the
- limitations.
-2. You can use a normal :class:`~urllib3.PoolManager` by enabling sockets.
- Sockets also have `limitations and restrictions
- <https://cloud.google.com/appengine/docs/python/sockets/\
- #limitations-and-restrictions>`_ and have a lower free quota than URLFetch.
- To use sockets, be sure to specify the following in your ``app.yaml``::
-
- env_variables:
- GAE_USE_SOCKETS_HTTPLIB : 'true'
-
-3. If you are using `App Engine Flexible
-<https://cloud.google.com/appengine/docs/flexible/>`_, you can use the standard
-:class:`PoolManager` without any configuration or special environment variables.
-"""
-
-from __future__ import absolute_import
-import logging
-import os
-import warnings
-from ..packages.six.moves.urllib.parse import urljoin
-
-from ..exceptions import (
- HTTPError,
- HTTPWarning,
- MaxRetryError,
- ProtocolError,
- TimeoutError,
- SSLError
-)
-
-from ..packages.six import BytesIO
-from ..request import RequestMethods
-from ..response import HTTPResponse
-from ..util.timeout import Timeout
-from ..util.retry import Retry
-
-try:
- from google.appengine.api import urlfetch
-except ImportError:
- urlfetch = None
-
-
-log = logging.getLogger(__name__)
-
-
-class AppEnginePlatformWarning(HTTPWarning):
- pass
-
-
-class AppEnginePlatformError(HTTPError):
- pass
-
-
-class AppEngineManager(RequestMethods):
- """
- Connection manager for Google App Engine sandbox applications.
-
- This manager uses the URLFetch service directly instead of using the
- emulated httplib, and is subject to URLFetch limitations as described in
- the App Engine documentation `here
- <https://cloud.google.com/appengine/docs/python/urlfetch>`_.
-
- Notably it will raise an :class:`AppEnginePlatformError` if:
- * URLFetch is not available.
- * If you attempt to use this on App Engine Flexible, as full socket
- support is available.
- * If a request size is more than 10 megabytes.
- * If a response size is more than 32 megabtyes.
- * If you use an unsupported request method such as OPTIONS.
-
- Beyond those cases, it will raise normal urllib3 errors.
- """
-
- def __init__(self, headers=None, retries=None, validate_certificate=True,
- urlfetch_retries=True):
- if not urlfetch:
- raise AppEnginePlatformError(
- "URLFetch is not available in this environment.")
-
- if is_prod_appengine_mvms():
- raise AppEnginePlatformError(
- "Use normal urllib3.PoolManager instead of AppEngineManager"
- "on Managed VMs, as using URLFetch is not necessary in "
- "this environment.")
-
- warnings.warn(
- "urllib3 is using URLFetch on Google App Engine sandbox instead "
- "of sockets. To use sockets directly instead of URLFetch see "
- "https://urllib3.readthedocs.io/en/latest/reference/urllib3.contrib.html.",
- AppEnginePlatformWarning)
-
- RequestMethods.__init__(self, headers)
- self.validate_certificate = validate_certificate
- self.urlfetch_retries = urlfetch_retries
-
- self.retries = retries or Retry.DEFAULT
-
- def __enter__(self):
- return self
-
- def __exit__(self, exc_type, exc_val, exc_tb):
- # Return False to re-raise any potential exceptions
- return False
-
- def urlopen(self, method, url, body=None, headers=None,
- retries=None, redirect=True, timeout=Timeout.DEFAULT_TIMEOUT,
- **response_kw):
-
- retries = self._get_retries(retries, redirect)
-
- try:
- follow_redirects = (
- redirect and
- retries.redirect != 0 and
- retries.total)
- response = urlfetch.fetch(
- url,
- payload=body,
- method=method,
- headers=headers or {},
- allow_truncated=False,
- follow_redirects=self.urlfetch_retries and follow_redirects,
- deadline=self._get_absolute_timeout(timeout),
- validate_certificate=self.validate_certificate,
- )
- except urlfetch.DeadlineExceededError as e:
- raise TimeoutError(self, e)
-
- except urlfetch.InvalidURLError as e:
- if 'too large' in str(e):
- raise AppEnginePlatformError(
- "URLFetch request too large, URLFetch only "
- "supports requests up to 10mb in size.", e)
- raise ProtocolError(e)
-
- except urlfetch.DownloadError as e:
- if 'Too many redirects' in str(e):
- raise MaxRetryError(self, url, reason=e)
- raise ProtocolError(e)
-
- except urlfetch.ResponseTooLargeError as e:
- raise AppEnginePlatformError(
- "URLFetch response too large, URLFetch only supports"
- "responses up to 32mb in size.", e)
-
- except urlfetch.SSLCertificateError as e:
- raise SSLError(e)
-
- except urlfetch.InvalidMethodError as e:
- raise AppEnginePlatformError(
- "URLFetch does not support method: %s" % method, e)
-
- http_response = self._urlfetch_response_to_http_response(
- response, retries=retries, **response_kw)
-
- # Handle redirect?
- redirect_location = redirect and http_response.get_redirect_location()
- if redirect_location:
- # Check for redirect response
- if (self.urlfetch_retries and retries.raise_on_redirect):
- raise MaxRetryError(self, url, "too many redirects")
- else:
- if http_response.status == 303:
- method = 'GET'
-
- try:
- retries = retries.increment(method, url, response=http_response, _pool=self)
- except MaxRetryError:
- if retries.raise_on_redirect:
- raise MaxRetryError(self, url, "too many redirects")
- return http_response
-
- retries.sleep_for_retry(http_response)
- log.debug("Redirecting %s -> %s", url, redirect_location)
- redirect_url = urljoin(url, redirect_location)
- return self.urlopen(
- method, redirect_url, body, headers,
- retries=retries, redirect=redirect,
- timeout=timeout, **response_kw)
-
- # Check if we should retry the HTTP response.
- has_retry_after = bool(http_response.getheader('Retry-After'))
- if retries.is_retry(method, http_response.status, has_retry_after):
- retries = retries.increment(
- method, url, response=http_response, _pool=self)
- log.debug("Retry: %s", url)
- retries.sleep(http_response)
- return self.urlopen(
- method, url,
- body=body, headers=headers,
- retries=retries, redirect=redirect,
- timeout=timeout, **response_kw)
-
- return http_response
-
- def _urlfetch_response_to_http_response(self, urlfetch_resp, **response_kw):
-
- if is_prod_appengine():
- # Production GAE handles deflate encoding automatically, but does
- # not remove the encoding header.
- content_encoding = urlfetch_resp.headers.get('content-encoding')
-
- if content_encoding == 'deflate':
- del urlfetch_resp.headers['content-encoding']
-
- transfer_encoding = urlfetch_resp.headers.get('transfer-encoding')
- # We have a full response's content,
- # so let's make sure we don't report ourselves as chunked data.
- if transfer_encoding == 'chunked':
- encodings = transfer_encoding.split(",")
- encodings.remove('chunked')
- urlfetch_resp.headers['transfer-encoding'] = ','.join(encodings)
-
- return HTTPResponse(
- # In order for decoding to work, we must present the content as
- # a file-like object.
- body=BytesIO(urlfetch_resp.content),
- headers=urlfetch_resp.headers,
- status=urlfetch_resp.status_code,
- **response_kw
- )
-
- def _get_absolute_timeout(self, timeout):
- if timeout is Timeout.DEFAULT_TIMEOUT:
- return None # Defer to URLFetch's default.
- if isinstance(timeout, Timeout):
- if timeout._read is not None or timeout._connect is not None:
- warnings.warn(
- "URLFetch does not support granular timeout settings, "
- "reverting to total or default URLFetch timeout.",
- AppEnginePlatformWarning)
- return timeout.total
- return timeout
-
- def _get_retries(self, retries, redirect):
- if not isinstance(retries, Retry):
- retries = Retry.from_int(
- retries, redirect=redirect, default=self.retries)
-
- if retries.connect or retries.read or retries.redirect:
- warnings.warn(
- "URLFetch only supports total retries and does not "
- "recognize connect, read, or redirect retry parameters.",
- AppEnginePlatformWarning)
-
- return retries
-
-
-def is_appengine():
- return (is_local_appengine() or
- is_prod_appengine() or
- is_prod_appengine_mvms())
-
-
-def is_appengine_sandbox():
- return is_appengine() and not is_prod_appengine_mvms()
-
-
-def is_local_appengine():
- return ('APPENGINE_RUNTIME' in os.environ and
- 'Development/' in os.environ['SERVER_SOFTWARE'])
-
-
-def is_prod_appengine():
- return ('APPENGINE_RUNTIME' in os.environ and
- 'Google App Engine/' in os.environ['SERVER_SOFTWARE'] and
- not is_prod_appengine_mvms())
-
-
-def is_prod_appengine_mvms():
- return os.environ.get('GAE_VM', False) == 'true'
diff --git a/src/collectors/python.d.plugin/python_modules/urllib3/contrib/ntlmpool.py b/src/collectors/python.d.plugin/python_modules/urllib3/contrib/ntlmpool.py
deleted file mode 100644
index 3f8c9ebf5..000000000
--- a/src/collectors/python.d.plugin/python_modules/urllib3/contrib/ntlmpool.py
+++ /dev/null
@@ -1,113 +0,0 @@
-# SPDX-License-Identifier: MIT
-"""
-NTLM authenticating pool, contributed by erikcederstran
-
-Issue #10, see: http://code.google.com/p/urllib3/issues/detail?id=10
-"""
-from __future__ import absolute_import
-
-from logging import getLogger
-from ntlm import ntlm
-
-from .. import HTTPSConnectionPool
-from ..packages.six.moves.http_client import HTTPSConnection
-
-
-log = getLogger(__name__)
-
-
-class NTLMConnectionPool(HTTPSConnectionPool):
- """
- Implements an NTLM authentication version of an urllib3 connection pool
- """
-
- scheme = 'https'
-
- def __init__(self, user, pw, authurl, *args, **kwargs):
- """
- authurl is a random URL on the server that is protected by NTLM.
- user is the Windows user, probably in the DOMAIN\\username format.
- pw is the password for the user.
- """
- super(NTLMConnectionPool, self).__init__(*args, **kwargs)
- self.authurl = authurl
- self.rawuser = user
- user_parts = user.split('\\', 1)
- self.domain = user_parts[0].upper()
- self.user = user_parts[1]
- self.pw = pw
-
- def _new_conn(self):
- # Performs the NTLM handshake that secures the connection. The socket
- # must be kept open while requests are performed.
- self.num_connections += 1
- log.debug('Starting NTLM HTTPS connection no. %d: https://%s%s',
- self.num_connections, self.host, self.authurl)
-
- headers = {}
- headers['Connection'] = 'Keep-Alive'
- req_header = 'Authorization'
- resp_header = 'www-authenticate'
-
- conn = HTTPSConnection(host=self.host, port=self.port)
-
- # Send negotiation message
- headers[req_header] = (
- 'NTLM %s' % ntlm.create_NTLM_NEGOTIATE_MESSAGE(self.rawuser))
- log.debug('Request headers: %s', headers)
- conn.request('GET', self.authurl, None, headers)
- res = conn.getresponse()
- reshdr = dict(res.getheaders())
- log.debug('Response status: %s %s', res.status, res.reason)
- log.debug('Response headers: %s', reshdr)
- log.debug('Response data: %s [...]', res.read(100))
-
- # Remove the reference to the socket, so that it can not be closed by
- # the response object (we want to keep the socket open)
- res.fp = None
-
- # Server should respond with a challenge message
- auth_header_values = reshdr[resp_header].split(', ')
- auth_header_value = None
- for s in auth_header_values:
- if s[:5] == 'NTLM ':
- auth_header_value = s[5:]
- if auth_header_value is None:
- raise Exception('Unexpected %s response header: %s' %
- (resp_header, reshdr[resp_header]))
-
- # Send authentication message
- ServerChallenge, NegotiateFlags = \
- ntlm.parse_NTLM_CHALLENGE_MESSAGE(auth_header_value)
- auth_msg = ntlm.create_NTLM_AUTHENTICATE_MESSAGE(ServerChallenge,
- self.user,
- self.domain,
- self.pw,
- NegotiateFlags)
- headers[req_header] = 'NTLM %s' % auth_msg
- log.debug('Request headers: %s', headers)
- conn.request('GET', self.authurl, None, headers)
- res = conn.getresponse()
- log.debug('Response status: %s %s', res.status, res.reason)
- log.debug('Response headers: %s', dict(res.getheaders()))
- log.debug('Response data: %s [...]', res.read()[:100])
- if res.status != 200:
- if res.status == 401:
- raise Exception('Server rejected request: wrong '
- 'username or password')
- raise Exception('Wrong server response: %s %s' %
- (res.status, res.reason))
-
- res.fp = None
- log.debug('Connection established')
- return conn
-
- def urlopen(self, method, url, body=None, headers=None, retries=3,
- redirect=True, assert_same_host=True):
- if headers is None:
- headers = {}
- headers['Connection'] = 'Keep-Alive'
- return super(NTLMConnectionPool, self).urlopen(method, url, body,
- headers, retries,
- redirect,
- assert_same_host)
diff --git a/src/collectors/python.d.plugin/python_modules/urllib3/contrib/pyopenssl.py b/src/collectors/python.d.plugin/python_modules/urllib3/contrib/pyopenssl.py
deleted file mode 100644
index 8d373507d..000000000
--- a/src/collectors/python.d.plugin/python_modules/urllib3/contrib/pyopenssl.py
+++ /dev/null
@@ -1,458 +0,0 @@
-# SPDX-License-Identifier: MIT
-"""
-SSL with SNI_-support for Python 2. Follow these instructions if you would
-like to verify SSL certificates in Python 2. Note, the default libraries do
-*not* do certificate checking; you need to do additional work to validate
-certificates yourself.
-
-This needs the following packages installed:
-
-* pyOpenSSL (tested with 16.0.0)
-* cryptography (minimum 1.3.4, from pyopenssl)
-* idna (minimum 2.0, from cryptography)
-
-However, pyopenssl depends on cryptography, which depends on idna, so while we
-use all three directly here we end up having relatively few packages required.
-
-You can install them with the following command:
-
- pip install pyopenssl cryptography idna
-
-To activate certificate checking, call
-:func:`~urllib3.contrib.pyopenssl.inject_into_urllib3` from your Python code
-before you begin making HTTP requests. This can be done in a ``sitecustomize``
-module, or at any other time before your application begins using ``urllib3``,
-like this::
-
- try:
- import urllib3.contrib.pyopenssl
- urllib3.contrib.pyopenssl.inject_into_urllib3()
- except ImportError:
- pass
-
-Now you can use :mod:`urllib3` as you normally would, and it will support SNI
-when the required modules are installed.
-
-Activating this module also has the positive side effect of disabling SSL/TLS
-compression in Python 2 (see `CRIME attack`_).
-
-If you want to configure the default list of supported cipher suites, you can
-set the ``urllib3.contrib.pyopenssl.DEFAULT_SSL_CIPHER_LIST`` variable.
-
-.. _sni: https://en.wikipedia.org/wiki/Server_Name_Indication
-.. _crime attack: https://en.wikipedia.org/wiki/CRIME_(security_exploit)
-"""
-from __future__ import absolute_import
-
-import OpenSSL.SSL
-from cryptography import x509
-from cryptography.hazmat.backends.openssl import backend as openssl_backend
-from cryptography.hazmat.backends.openssl.x509 import _Certificate
-
-from socket import timeout, error as SocketError
-from io import BytesIO
-
-try: # Platform-specific: Python 2
- from socket import _fileobject
-except ImportError: # Platform-specific: Python 3
- _fileobject = None
- from ..packages.backports.makefile import backport_makefile
-
-import logging
-import ssl
-
-try:
- import six
-except ImportError:
- from ..packages import six
-
-import sys
-
-from .. import util
-
-__all__ = ['inject_into_urllib3', 'extract_from_urllib3']
-
-# SNI always works.
-HAS_SNI = True
-
-# Map from urllib3 to PyOpenSSL compatible parameter-values.
-_openssl_versions = {
- ssl.PROTOCOL_SSLv23: OpenSSL.SSL.SSLv23_METHOD,
- ssl.PROTOCOL_TLSv1: OpenSSL.SSL.TLSv1_METHOD,
-}
-
-if hasattr(ssl, 'PROTOCOL_TLSv1_1') and hasattr(OpenSSL.SSL, 'TLSv1_1_METHOD'):
- _openssl_versions[ssl.PROTOCOL_TLSv1_1] = OpenSSL.SSL.TLSv1_1_METHOD
-
-if hasattr(ssl, 'PROTOCOL_TLSv1_2') and hasattr(OpenSSL.SSL, 'TLSv1_2_METHOD'):
- _openssl_versions[ssl.PROTOCOL_TLSv1_2] = OpenSSL.SSL.TLSv1_2_METHOD
-
-try:
- _openssl_versions.update({ssl.PROTOCOL_SSLv3: OpenSSL.SSL.SSLv3_METHOD})
-except AttributeError:
- pass
-
-_stdlib_to_openssl_verify = {
- ssl.CERT_NONE: OpenSSL.SSL.VERIFY_NONE,
- ssl.CERT_OPTIONAL: OpenSSL.SSL.VERIFY_PEER,
- ssl.CERT_REQUIRED:
- OpenSSL.SSL.VERIFY_PEER + OpenSSL.SSL.VERIFY_FAIL_IF_NO_PEER_CERT,
-}
-_openssl_to_stdlib_verify = dict(
- (v, k) for k, v in _stdlib_to_openssl_verify.items()
-)
-
-# OpenSSL will only write 16K at a time
-SSL_WRITE_BLOCKSIZE = 16384
-
-orig_util_HAS_SNI = util.HAS_SNI
-orig_util_SSLContext = util.ssl_.SSLContext
-
-
-log = logging.getLogger(__name__)
-
-
-def inject_into_urllib3():
- 'Monkey-patch urllib3 with PyOpenSSL-backed SSL-support.'
-
- _validate_dependencies_met()
-
- util.ssl_.SSLContext = PyOpenSSLContext
- util.HAS_SNI = HAS_SNI
- util.ssl_.HAS_SNI = HAS_SNI
- util.IS_PYOPENSSL = True
- util.ssl_.IS_PYOPENSSL = True
-
-
-def extract_from_urllib3():
- 'Undo monkey-patching by :func:`inject_into_urllib3`.'
-
- util.ssl_.SSLContext = orig_util_SSLContext
- util.HAS_SNI = orig_util_HAS_SNI
- util.ssl_.HAS_SNI = orig_util_HAS_SNI
- util.IS_PYOPENSSL = False
- util.ssl_.IS_PYOPENSSL = False
-
-
-def _validate_dependencies_met():
- """
- Verifies that PyOpenSSL's package-level dependencies have been met.
- Throws `ImportError` if they are not met.
- """
- # Method added in `cryptography==1.1`; not available in older versions
- from cryptography.x509.extensions import Extensions
- if getattr(Extensions, "get_extension_for_class", None) is None:
- raise ImportError("'cryptography' module missing required functionality. "
- "Try upgrading to v1.3.4 or newer.")
-
- # pyOpenSSL 0.14 and above use cryptography for OpenSSL bindings. The _x509
- # attribute is only present on those versions.
- from OpenSSL.crypto import X509
- x509 = X509()
- if getattr(x509, "_x509", None) is None:
- raise ImportError("'pyOpenSSL' module missing required functionality. "
- "Try upgrading to v0.14 or newer.")
-
-
-def _dnsname_to_stdlib(name):
- """
- Converts a dNSName SubjectAlternativeName field to the form used by the
- standard library on the given Python version.
-
- Cryptography produces a dNSName as a unicode string that was idna-decoded
- from ASCII bytes. We need to idna-encode that string to get it back, and
- then on Python 3 we also need to convert to unicode via UTF-8 (the stdlib
- uses PyUnicode_FromStringAndSize on it, which decodes via UTF-8).
- """
- def idna_encode(name):
- """
- Borrowed wholesale from the Python Cryptography Project. It turns out
- that we can't just safely call `idna.encode`: it can explode for
- wildcard names. This avoids that problem.
- """
- import idna
-
- for prefix in [u'*.', u'.']:
- if name.startswith(prefix):
- name = name[len(prefix):]
- return prefix.encode('ascii') + idna.encode(name)
- return idna.encode(name)
-
- name = idna_encode(name)
- if sys.version_info >= (3, 0):
- name = name.decode('utf-8')
- return name
-
-
-def get_subj_alt_name(peer_cert):
- """
- Given an PyOpenSSL certificate, provides all the subject alternative names.
- """
- # Pass the cert to cryptography, which has much better APIs for this.
- # This is technically using private APIs, but should work across all
- # relevant versions until PyOpenSSL gets something proper for this.
- cert = _Certificate(openssl_backend, peer_cert._x509)
-
- # We want to find the SAN extension. Ask Cryptography to locate it (it's
- # faster than looping in Python)
- try:
- ext = cert.extensions.get_extension_for_class(
- x509.SubjectAlternativeName
- ).value
- except x509.ExtensionNotFound:
- # No such extension, return the empty list.
- return []
- except (x509.DuplicateExtension, x509.UnsupportedExtension,
- x509.UnsupportedGeneralNameType, UnicodeError) as e:
- # A problem has been found with the quality of the certificate. Assume
- # no SAN field is present.
- log.warning(
- "A problem was encountered with the certificate that prevented "
- "urllib3 from finding the SubjectAlternativeName field. This can "
- "affect certificate validation. The error was %s",
- e,
- )
- return []
-
- # We want to return dNSName and iPAddress fields. We need to cast the IPs
- # back to strings because the match_hostname function wants them as
- # strings.
- # Sadly the DNS names need to be idna encoded and then, on Python 3, UTF-8
- # decoded. This is pretty frustrating, but that's what the standard library
- # does with certificates, and so we need to attempt to do the same.
- names = [
- ('DNS', _dnsname_to_stdlib(name))
- for name in ext.get_values_for_type(x509.DNSName)
- ]
- names.extend(
- ('IP Address', str(name))
- for name in ext.get_values_for_type(x509.IPAddress)
- )
-
- return names
-
-
-class WrappedSocket(object):
- '''API-compatibility wrapper for Python OpenSSL's Connection-class.
-
- Note: _makefile_refs, _drop() and _reuse() are needed for the garbage
- collector of pypy.
- '''
-
- def __init__(self, connection, socket, suppress_ragged_eofs=True):
- self.connection = connection
- self.socket = socket
- self.suppress_ragged_eofs = suppress_ragged_eofs
- self._makefile_refs = 0
- self._closed = False
-
- def fileno(self):
- return self.socket.fileno()
-
- # Copy-pasted from Python 3.5 source code
- def _decref_socketios(self):
- if self._makefile_refs > 0:
- self._makefile_refs -= 1
- if self._closed:
- self.close()
-
- def recv(self, *args, **kwargs):
- try:
- data = self.connection.recv(*args, **kwargs)
- except OpenSSL.SSL.SysCallError as e:
- if self.suppress_ragged_eofs and e.args == (-1, 'Unexpected EOF'):
- return b''
- else:
- raise SocketError(str(e))
- except OpenSSL.SSL.ZeroReturnError as e:
- if self.connection.get_shutdown() == OpenSSL.SSL.RECEIVED_SHUTDOWN:
- return b''
- else:
- raise
- except OpenSSL.SSL.WantReadError:
- rd = util.wait_for_read(self.socket, self.socket.gettimeout())
- if not rd:
- raise timeout('The read operation timed out')
- else:
- return self.recv(*args, **kwargs)
- else:
- return data
-
- def recv_into(self, *args, **kwargs):
- try:
- return self.connection.recv_into(*args, **kwargs)
- except OpenSSL.SSL.SysCallError as e:
- if self.suppress_ragged_eofs and e.args == (-1, 'Unexpected EOF'):
- return 0
- else:
- raise SocketError(str(e))
- except OpenSSL.SSL.ZeroReturnError as e:
- if self.connection.get_shutdown() == OpenSSL.SSL.RECEIVED_SHUTDOWN:
- return 0
- else:
- raise
- except OpenSSL.SSL.WantReadError:
- rd = util.wait_for_read(self.socket, self.socket.gettimeout())
- if not rd:
- raise timeout('The read operation timed out')
- else:
- return self.recv_into(*args, **kwargs)
-
- def settimeout(self, timeout):
- return self.socket.settimeout(timeout)
-
- def _send_until_done(self, data):
- while True:
- try:
- return self.connection.send(data)
- except OpenSSL.SSL.WantWriteError:
- wr = util.wait_for_write(self.socket, self.socket.gettimeout())
- if not wr:
- raise timeout()
- continue
- except OpenSSL.SSL.SysCallError as e:
- raise SocketError(str(e))
-
- def sendall(self, data):
- total_sent = 0
- while total_sent < len(data):
- sent = self._send_until_done(data[total_sent:total_sent + SSL_WRITE_BLOCKSIZE])
- total_sent += sent
-
- def shutdown(self):
- # FIXME rethrow compatible exceptions should we ever use this
- self.connection.shutdown()
-
- def close(self):
- if self._makefile_refs < 1:
- try:
- self._closed = True
- return self.connection.close()
- except OpenSSL.SSL.Error:
- return
- else:
- self._makefile_refs -= 1
-
- def getpeercert(self, binary_form=False):
- x509 = self.connection.get_peer_certificate()
-
- if not x509:
- return x509
-
- if binary_form:
- return OpenSSL.crypto.dump_certificate(
- OpenSSL.crypto.FILETYPE_ASN1,
- x509)
-
- return {
- 'subject': (
- (('commonName', x509.get_subject().CN),),
- ),
- 'subjectAltName': get_subj_alt_name(x509)
- }
-
- def _reuse(self):
- self._makefile_refs += 1
-
- def _drop(self):
- if self._makefile_refs < 1:
- self.close()
- else:
- self._makefile_refs -= 1
-
-
-if _fileobject: # Platform-specific: Python 2
- def makefile(self, mode, bufsize=-1):
- self._makefile_refs += 1
- return _fileobject(self, mode, bufsize, close=True)
-else: # Platform-specific: Python 3
- makefile = backport_makefile
-
-WrappedSocket.makefile = makefile
-
-
-class PyOpenSSLContext(object):
- """
- I am a wrapper class for the PyOpenSSL ``Context`` object. I am responsible
- for translating the interface of the standard library ``SSLContext`` object
- to calls into PyOpenSSL.
- """
- def __init__(self, protocol):
- self.protocol = _openssl_versions[protocol]
- self._ctx = OpenSSL.SSL.Context(self.protocol)
- self._options = 0
- self.check_hostname = False
-
- @property
- def options(self):
- return self._options
-
- @options.setter
- def options(self, value):
- self._options = value
- self._ctx.set_options(value)
-
- @property
- def verify_mode(self):
- return _openssl_to_stdlib_verify[self._ctx.get_verify_mode()]
-
- @verify_mode.setter
- def verify_mode(self, value):
- self._ctx.set_verify(
- _stdlib_to_openssl_verify[value],
- _verify_callback
- )
-
- def set_default_verify_paths(self):
- self._ctx.set_default_verify_paths()
-
- def set_ciphers(self, ciphers):
- if isinstance(ciphers, six.text_type):
- ciphers = ciphers.encode('utf-8')
- self._ctx.set_cipher_list(ciphers)
-
- def load_verify_locations(self, cafile=None, capath=None, cadata=None):
- if cafile is not None:
- cafile = cafile.encode('utf-8')
- if capath is not None:
- capath = capath.encode('utf-8')
- self._ctx.load_verify_locations(cafile, capath)
- if cadata is not None:
- self._ctx.load_verify_locations(BytesIO(cadata))
-
- def load_cert_chain(self, certfile, keyfile=None, password=None):
- self._ctx.use_certificate_file(certfile)
- if password is not None:
- self._ctx.set_passwd_cb(lambda max_length, prompt_twice, userdata: password)
- self._ctx.use_privatekey_file(keyfile or certfile)
-
- def wrap_socket(self, sock, server_side=False,
- do_handshake_on_connect=True, suppress_ragged_eofs=True,
- server_hostname=None):
- cnx = OpenSSL.SSL.Connection(self._ctx, sock)
-
- if isinstance(server_hostname, six.text_type): # Platform-specific: Python 3
- server_hostname = server_hostname.encode('utf-8')
-
- if server_hostname is not None:
- cnx.set_tlsext_host_name(server_hostname)
-
- cnx.set_connect_state()
-
- while True:
- try:
- cnx.do_handshake()
- except OpenSSL.SSL.WantReadError:
- rd = util.wait_for_read(sock, sock.gettimeout())
- if not rd:
- raise timeout('select timed out')
- continue
- except OpenSSL.SSL.Error as e:
- raise ssl.SSLError('bad handshake: %r' % e)
- break
-
- return WrappedSocket(cnx, sock)
-
-
-def _verify_callback(cnx, x509, err_no, err_depth, return_code):
- return err_no == 0
diff --git a/src/collectors/python.d.plugin/python_modules/urllib3/contrib/securetransport.py b/src/collectors/python.d.plugin/python_modules/urllib3/contrib/securetransport.py
deleted file mode 100644
index fcc30118c..000000000
--- a/src/collectors/python.d.plugin/python_modules/urllib3/contrib/securetransport.py
+++ /dev/null
@@ -1,808 +0,0 @@
-# SPDX-License-Identifier: MIT
-"""
-SecureTranport support for urllib3 via ctypes.
-
-This makes platform-native TLS available to urllib3 users on macOS without the
-use of a compiler. This is an important feature because the Python Package
-Index is moving to become a TLSv1.2-or-higher server, and the default OpenSSL
-that ships with macOS is not capable of doing TLSv1.2. The only way to resolve
-this is to give macOS users an alternative solution to the problem, and that
-solution is to use SecureTransport.
-
-We use ctypes here because this solution must not require a compiler. That's
-because pip is not allowed to require a compiler either.
-
-This is not intended to be a seriously long-term solution to this problem.
-The hope is that PEP 543 will eventually solve this issue for us, at which
-point we can retire this contrib module. But in the short term, we need to
-solve the impending tire fire that is Python on Mac without this kind of
-contrib module. So...here we are.
-
-To use this module, simply import and inject it::
-
- import urllib3.contrib.securetransport
- urllib3.contrib.securetransport.inject_into_urllib3()
-
-Happy TLSing!
-"""
-from __future__ import absolute_import
-
-import contextlib
-import ctypes
-import errno
-import os.path
-import shutil
-import socket
-import ssl
-import threading
-import weakref
-
-from .. import util
-from ._securetransport.bindings import (
- Security, SecurityConst, CoreFoundation
-)
-from ._securetransport.low_level import (
- _assert_no_error, _cert_array_from_pem, _temporary_keychain,
- _load_client_cert_chain
-)
-
-try: # Platform-specific: Python 2
- from socket import _fileobject
-except ImportError: # Platform-specific: Python 3
- _fileobject = None
- from ..packages.backports.makefile import backport_makefile
-
-try:
- memoryview(b'')
-except NameError:
- raise ImportError("SecureTransport only works on Pythons with memoryview")
-
-__all__ = ['inject_into_urllib3', 'extract_from_urllib3']
-
-# SNI always works
-HAS_SNI = True
-
-orig_util_HAS_SNI = util.HAS_SNI
-orig_util_SSLContext = util.ssl_.SSLContext
-
-# This dictionary is used by the read callback to obtain a handle to the
-# calling wrapped socket. This is a pretty silly approach, but for now it'll
-# do. I feel like I should be able to smuggle a handle to the wrapped socket
-# directly in the SSLConnectionRef, but for now this approach will work I
-# guess.
-#
-# We need to lock around this structure for inserts, but we don't do it for
-# reads/writes in the callbacks. The reasoning here goes as follows:
-#
-# 1. It is not possible to call into the callbacks before the dictionary is
-# populated, so once in the callback the id must be in the dictionary.
-# 2. The callbacks don't mutate the dictionary, they only read from it, and
-# so cannot conflict with any of the insertions.
-#
-# This is good: if we had to lock in the callbacks we'd drastically slow down
-# the performance of this code.
-_connection_refs = weakref.WeakValueDictionary()
-_connection_ref_lock = threading.Lock()
-
-# Limit writes to 16kB. This is OpenSSL's limit, but we'll cargo-cult it over
-# for no better reason than we need *a* limit, and this one is right there.
-SSL_WRITE_BLOCKSIZE = 16384
-
-# This is our equivalent of util.ssl_.DEFAULT_CIPHERS, but expanded out to
-# individual cipher suites. We need to do this becuase this is how
-# SecureTransport wants them.
-CIPHER_SUITES = [
- SecurityConst.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,
- SecurityConst.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,
- SecurityConst.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,
- SecurityConst.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,
- SecurityConst.TLS_DHE_DSS_WITH_AES_256_GCM_SHA384,
- SecurityConst.TLS_DHE_RSA_WITH_AES_256_GCM_SHA384,
- SecurityConst.TLS_DHE_DSS_WITH_AES_128_GCM_SHA256,
- SecurityConst.TLS_DHE_RSA_WITH_AES_128_GCM_SHA256,
- SecurityConst.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384,
- SecurityConst.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384,
- SecurityConst.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,
- SecurityConst.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,
- SecurityConst.TLS_DHE_RSA_WITH_AES_256_CBC_SHA256,
- SecurityConst.TLS_DHE_DSS_WITH_AES_256_CBC_SHA256,
- SecurityConst.TLS_DHE_RSA_WITH_AES_256_CBC_SHA,
- SecurityConst.TLS_DHE_DSS_WITH_AES_256_CBC_SHA,
- SecurityConst.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256,
- SecurityConst.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256,
- SecurityConst.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,
- SecurityConst.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,
- SecurityConst.TLS_DHE_RSA_WITH_AES_128_CBC_SHA256,
- SecurityConst.TLS_DHE_DSS_WITH_AES_128_CBC_SHA256,
- SecurityConst.TLS_DHE_RSA_WITH_AES_128_CBC_SHA,
- SecurityConst.TLS_DHE_DSS_WITH_AES_128_CBC_SHA,
- SecurityConst.TLS_RSA_WITH_AES_256_GCM_SHA384,
- SecurityConst.TLS_RSA_WITH_AES_128_GCM_SHA256,
- SecurityConst.TLS_RSA_WITH_AES_256_CBC_SHA256,
- SecurityConst.TLS_RSA_WITH_AES_128_CBC_SHA256,
- SecurityConst.TLS_RSA_WITH_AES_256_CBC_SHA,
- SecurityConst.TLS_RSA_WITH_AES_128_CBC_SHA,
-]
-
-# Basically this is simple: for PROTOCOL_SSLv23 we turn it into a low of
-# TLSv1 and a high of TLSv1.2. For everything else, we pin to that version.
-_protocol_to_min_max = {
- ssl.PROTOCOL_SSLv23: (SecurityConst.kTLSProtocol1, SecurityConst.kTLSProtocol12),
-}
-
-if hasattr(ssl, "PROTOCOL_SSLv2"):
- _protocol_to_min_max[ssl.PROTOCOL_SSLv2] = (
- SecurityConst.kSSLProtocol2, SecurityConst.kSSLProtocol2
- )
-if hasattr(ssl, "PROTOCOL_SSLv3"):
- _protocol_to_min_max[ssl.PROTOCOL_SSLv3] = (
- SecurityConst.kSSLProtocol3, SecurityConst.kSSLProtocol3
- )
-if hasattr(ssl, "PROTOCOL_TLSv1"):
- _protocol_to_min_max[ssl.PROTOCOL_TLSv1] = (
- SecurityConst.kTLSProtocol1, SecurityConst.kTLSProtocol1
- )
-if hasattr(ssl, "PROTOCOL_TLSv1_1"):
- _protocol_to_min_max[ssl.PROTOCOL_TLSv1_1] = (
- SecurityConst.kTLSProtocol11, SecurityConst.kTLSProtocol11
- )
-if hasattr(ssl, "PROTOCOL_TLSv1_2"):
- _protocol_to_min_max[ssl.PROTOCOL_TLSv1_2] = (
- SecurityConst.kTLSProtocol12, SecurityConst.kTLSProtocol12
- )
-if hasattr(ssl, "PROTOCOL_TLS"):
- _protocol_to_min_max[ssl.PROTOCOL_TLS] = _protocol_to_min_max[ssl.PROTOCOL_SSLv23]
-
-
-def inject_into_urllib3():
- """
- Monkey-patch urllib3 with SecureTransport-backed SSL-support.
- """
- util.ssl_.SSLContext = SecureTransportContext
- util.HAS_SNI = HAS_SNI
- util.ssl_.HAS_SNI = HAS_SNI
- util.IS_SECURETRANSPORT = True
- util.ssl_.IS_SECURETRANSPORT = True
-
-
-def extract_from_urllib3():
- """
- Undo monkey-patching by :func:`inject_into_urllib3`.
- """
- util.ssl_.SSLContext = orig_util_SSLContext
- util.HAS_SNI = orig_util_HAS_SNI
- util.ssl_.HAS_SNI = orig_util_HAS_SNI
- util.IS_SECURETRANSPORT = False
- util.ssl_.IS_SECURETRANSPORT = False
-
-
-def _read_callback(connection_id, data_buffer, data_length_pointer):
- """
- SecureTransport read callback. This is called by ST to request that data
- be returned from the socket.
- """
- wrapped_socket = None
- try:
- wrapped_socket = _connection_refs.get(connection_id)
- if wrapped_socket is None:
- return SecurityConst.errSSLInternal
- base_socket = wrapped_socket.socket
-
- requested_length = data_length_pointer[0]
-
- timeout = wrapped_socket.gettimeout()
- error = None
- read_count = 0
- buffer = (ctypes.c_char * requested_length).from_address(data_buffer)
- buffer_view = memoryview(buffer)
-
- try:
- while read_count < requested_length:
- if timeout is None or timeout >= 0:
- readables = util.wait_for_read([base_socket], timeout)
- if not readables:
- raise socket.error(errno.EAGAIN, 'timed out')
-
- # We need to tell ctypes that we have a buffer that can be
- # written to. Upsettingly, we do that like this:
- chunk_size = base_socket.recv_into(
- buffer_view[read_count:requested_length]
- )
- read_count += chunk_size
- if not chunk_size:
- if not read_count:
- return SecurityConst.errSSLClosedGraceful
- break
- except (socket.error) as e:
- error = e.errno
-
- if error is not None and error != errno.EAGAIN:
- if error == errno.ECONNRESET:
- return SecurityConst.errSSLClosedAbort
- raise
-
- data_length_pointer[0] = read_count
-
- if read_count != requested_length:
- return SecurityConst.errSSLWouldBlock
-
- return 0
- except Exception as e:
- if wrapped_socket is not None:
- wrapped_socket._exception = e
- return SecurityConst.errSSLInternal
-
-
-def _write_callback(connection_id, data_buffer, data_length_pointer):
- """
- SecureTransport write callback. This is called by ST to request that data
- actually be sent on the network.
- """
- wrapped_socket = None
- try:
- wrapped_socket = _connection_refs.get(connection_id)
- if wrapped_socket is None:
- return SecurityConst.errSSLInternal
- base_socket = wrapped_socket.socket
-
- bytes_to_write = data_length_pointer[0]
- data = ctypes.string_at(data_buffer, bytes_to_write)
-
- timeout = wrapped_socket.gettimeout()
- error = None
- sent = 0
-
- try:
- while sent < bytes_to_write:
- if timeout is None or timeout >= 0:
- writables = util.wait_for_write([base_socket], timeout)
- if not writables:
- raise socket.error(errno.EAGAIN, 'timed out')
- chunk_sent = base_socket.send(data)
- sent += chunk_sent
-
- # This has some needless copying here, but I'm not sure there's
- # much value in optimising this data path.
- data = data[chunk_sent:]
- except (socket.error) as e:
- error = e.errno
-
- if error is not None and error != errno.EAGAIN:
- if error == errno.ECONNRESET:
- return SecurityConst.errSSLClosedAbort
- raise
-
- data_length_pointer[0] = sent
- if sent != bytes_to_write:
- return SecurityConst.errSSLWouldBlock
-
- return 0
- except Exception as e:
- if wrapped_socket is not None:
- wrapped_socket._exception = e
- return SecurityConst.errSSLInternal
-
-
-# We need to keep these two objects references alive: if they get GC'd while
-# in use then SecureTransport could attempt to call a function that is in freed
-# memory. That would be...uh...bad. Yeah, that's the word. Bad.
-_read_callback_pointer = Security.SSLReadFunc(_read_callback)
-_write_callback_pointer = Security.SSLWriteFunc(_write_callback)
-
-
-class WrappedSocket(object):
- """
- API-compatibility wrapper for Python's OpenSSL wrapped socket object.
-
- Note: _makefile_refs, _drop(), and _reuse() are needed for the garbage
- collector of PyPy.
- """
- def __init__(self, socket):
- self.socket = socket
- self.context = None
- self._makefile_refs = 0
- self._closed = False
- self._exception = None
- self._keychain = None
- self._keychain_dir = None
- self._client_cert_chain = None
-
- # We save off the previously-configured timeout and then set it to
- # zero. This is done because we use select and friends to handle the
- # timeouts, but if we leave the timeout set on the lower socket then
- # Python will "kindly" call select on that socket again for us. Avoid
- # that by forcing the timeout to zero.
- self._timeout = self.socket.gettimeout()
- self.socket.settimeout(0)
-
- @contextlib.contextmanager
- def _raise_on_error(self):
- """
- A context manager that can be used to wrap calls that do I/O from
- SecureTransport. If any of the I/O callbacks hit an exception, this
- context manager will correctly propagate the exception after the fact.
- This avoids silently swallowing those exceptions.
-
- It also correctly forces the socket closed.
- """
- self._exception = None
-
- # We explicitly don't catch around this yield because in the unlikely
- # event that an exception was hit in the block we don't want to swallow
- # it.
- yield
- if self._exception is not None:
- exception, self._exception = self._exception, None
- self.close()
- raise exception
-
- def _set_ciphers(self):
- """
- Sets up the allowed ciphers. By default this matches the set in
- util.ssl_.DEFAULT_CIPHERS, at least as supported by macOS. This is done
- custom and doesn't allow changing at this time, mostly because parsing
- OpenSSL cipher strings is going to be a freaking nightmare.
- """
- ciphers = (Security.SSLCipherSuite * len(CIPHER_SUITES))(*CIPHER_SUITES)
- result = Security.SSLSetEnabledCiphers(
- self.context, ciphers, len(CIPHER_SUITES)
- )
- _assert_no_error(result)
-
- def _custom_validate(self, verify, trust_bundle):
- """
- Called when we have set custom validation. We do this in two cases:
- first, when cert validation is entirely disabled; and second, when
- using a custom trust DB.
- """
- # If we disabled cert validation, just say: cool.
- if not verify:
- return
-
- # We want data in memory, so load it up.
- if os.path.isfile(trust_bundle):
- with open(trust_bundle, 'rb') as f:
- trust_bundle = f.read()
-
- cert_array = None
- trust = Security.SecTrustRef()
-
- try:
- # Get a CFArray that contains the certs we want.
- cert_array = _cert_array_from_pem(trust_bundle)
-
- # Ok, now the hard part. We want to get the SecTrustRef that ST has
- # created for this connection, shove our CAs into it, tell ST to
- # ignore everything else it knows, and then ask if it can build a
- # chain. This is a buuuunch of code.
- result = Security.SSLCopyPeerTrust(
- self.context, ctypes.byref(trust)
- )
- _assert_no_error(result)
- if not trust:
- raise ssl.SSLError("Failed to copy trust reference")
-
- result = Security.SecTrustSetAnchorCertificates(trust, cert_array)
- _assert_no_error(result)
-
- result = Security.SecTrustSetAnchorCertificatesOnly(trust, True)
- _assert_no_error(result)
-
- trust_result = Security.SecTrustResultType()
- result = Security.SecTrustEvaluate(
- trust, ctypes.byref(trust_result)
- )
- _assert_no_error(result)
- finally:
- if trust:
- CoreFoundation.CFRelease(trust)
-
- if cert_array is None:
- CoreFoundation.CFRelease(cert_array)
-
- # Ok, now we can look at what the result was.
- successes = (
- SecurityConst.kSecTrustResultUnspecified,
- SecurityConst.kSecTrustResultProceed
- )
- if trust_result.value not in successes:
- raise ssl.SSLError(
- "certificate verify failed, error code: %d" %
- trust_result.value
- )
-
- def handshake(self,
- server_hostname,
- verify,
- trust_bundle,
- min_version,
- max_version,
- client_cert,
- client_key,
- client_key_passphrase):
- """
- Actually performs the TLS handshake. This is run automatically by
- wrapped socket, and shouldn't be needed in user code.
- """
- # First, we do the initial bits of connection setup. We need to create
- # a context, set its I/O funcs, and set the connection reference.
- self.context = Security.SSLCreateContext(
- None, SecurityConst.kSSLClientSide, SecurityConst.kSSLStreamType
- )
- result = Security.SSLSetIOFuncs(
- self.context, _read_callback_pointer, _write_callback_pointer
- )
- _assert_no_error(result)
-
- # Here we need to compute the handle to use. We do this by taking the
- # id of self modulo 2**31 - 1. If this is already in the dictionary, we
- # just keep incrementing by one until we find a free space.
- with _connection_ref_lock:
- handle = id(self) % 2147483647
- while handle in _connection_refs:
- handle = (handle + 1) % 2147483647
- _connection_refs[handle] = self
-
- result = Security.SSLSetConnection(self.context, handle)
- _assert_no_error(result)
-
- # If we have a server hostname, we should set that too.
- if server_hostname:
- if not isinstance(server_hostname, bytes):
- server_hostname = server_hostname.encode('utf-8')
-
- result = Security.SSLSetPeerDomainName(
- self.context, server_hostname, len(server_hostname)
- )
- _assert_no_error(result)
-
- # Setup the ciphers.
- self._set_ciphers()
-
- # Set the minimum and maximum TLS versions.
- result = Security.SSLSetProtocolVersionMin(self.context, min_version)
- _assert_no_error(result)
- result = Security.SSLSetProtocolVersionMax(self.context, max_version)
- _assert_no_error(result)
-
- # If there's a trust DB, we need to use it. We do that by telling
- # SecureTransport to break on server auth. We also do that if we don't
- # want to validate the certs at all: we just won't actually do any
- # authing in that case.
- if not verify or trust_bundle is not None:
- result = Security.SSLSetSessionOption(
- self.context,
- SecurityConst.kSSLSessionOptionBreakOnServerAuth,
- True
- )
- _assert_no_error(result)
-
- # If there's a client cert, we need to use it.
- if client_cert:
- self._keychain, self._keychain_dir = _temporary_keychain()
- self._client_cert_chain = _load_client_cert_chain(
- self._keychain, client_cert, client_key
- )
- result = Security.SSLSetCertificate(
- self.context, self._client_cert_chain
- )
- _assert_no_error(result)
-
- while True:
- with self._raise_on_error():
- result = Security.SSLHandshake(self.context)
-
- if result == SecurityConst.errSSLWouldBlock:
- raise socket.timeout("handshake timed out")
- elif result == SecurityConst.errSSLServerAuthCompleted:
- self._custom_validate(verify, trust_bundle)
- continue
- else:
- _assert_no_error(result)
- break
-
- def fileno(self):
- return self.socket.fileno()
-
- # Copy-pasted from Python 3.5 source code
- def _decref_socketios(self):
- if self._makefile_refs > 0:
- self._makefile_refs -= 1
- if self._closed:
- self.close()
-
- def recv(self, bufsiz):
- buffer = ctypes.create_string_buffer(bufsiz)
- bytes_read = self.recv_into(buffer, bufsiz)
- data = buffer[:bytes_read]
- return data
-
- def recv_into(self, buffer, nbytes=None):
- # Read short on EOF.
- if self._closed:
- return 0
-
- if nbytes is None:
- nbytes = len(buffer)
-
- buffer = (ctypes.c_char * nbytes).from_buffer(buffer)
- processed_bytes = ctypes.c_size_t(0)
-
- with self._raise_on_error():
- result = Security.SSLRead(
- self.context, buffer, nbytes, ctypes.byref(processed_bytes)
- )
-
- # There are some result codes that we want to treat as "not always
- # errors". Specifically, those are errSSLWouldBlock,
- # errSSLClosedGraceful, and errSSLClosedNoNotify.
- if (result == SecurityConst.errSSLWouldBlock):
- # If we didn't process any bytes, then this was just a time out.
- # However, we can get errSSLWouldBlock in situations when we *did*
- # read some data, and in those cases we should just read "short"
- # and return.
- if processed_bytes.value == 0:
- # Timed out, no data read.
- raise socket.timeout("recv timed out")
- elif result in (SecurityConst.errSSLClosedGraceful, SecurityConst.errSSLClosedNoNotify):
- # The remote peer has closed this connection. We should do so as
- # well. Note that we don't actually return here because in
- # principle this could actually be fired along with return data.
- # It's unlikely though.
- self.close()
- else:
- _assert_no_error(result)
-
- # Ok, we read and probably succeeded. We should return whatever data
- # was actually read.
- return processed_bytes.value
-
- def settimeout(self, timeout):
- self._timeout = timeout
-
- def gettimeout(self):
- return self._timeout
-
- def send(self, data):
- processed_bytes = ctypes.c_size_t(0)
-
- with self._raise_on_error():
- result = Security.SSLWrite(
- self.context, data, len(data), ctypes.byref(processed_bytes)
- )
-
- if result == SecurityConst.errSSLWouldBlock and processed_bytes.value == 0:
- # Timed out
- raise socket.timeout("send timed out")
- else:
- _assert_no_error(result)
-
- # We sent, and probably succeeded. Tell them how much we sent.
- return processed_bytes.value
-
- def sendall(self, data):
- total_sent = 0
- while total_sent < len(data):
- sent = self.send(data[total_sent:total_sent + SSL_WRITE_BLOCKSIZE])
- total_sent += sent
-
- def shutdown(self):
- with self._raise_on_error():
- Security.SSLClose(self.context)
-
- def close(self):
- # TODO: should I do clean shutdown here? Do I have to?
- if self._makefile_refs < 1:
- self._closed = True
- if self.context:
- CoreFoundation.CFRelease(self.context)
- self.context = None
- if self._client_cert_chain:
- CoreFoundation.CFRelease(self._client_cert_chain)
- self._client_cert_chain = None
- if self._keychain:
- Security.SecKeychainDelete(self._keychain)
- CoreFoundation.CFRelease(self._keychain)
- shutil.rmtree(self._keychain_dir)
- self._keychain = self._keychain_dir = None
- return self.socket.close()
- else:
- self._makefile_refs -= 1
-
- def getpeercert(self, binary_form=False):
- # Urgh, annoying.
- #
- # Here's how we do this:
- #
- # 1. Call SSLCopyPeerTrust to get hold of the trust object for this
- # connection.
- # 2. Call SecTrustGetCertificateAtIndex for index 0 to get the leaf.
- # 3. To get the CN, call SecCertificateCopyCommonName and process that
- # string so that it's of the appropriate type.
- # 4. To get the SAN, we need to do something a bit more complex:
- # a. Call SecCertificateCopyValues to get the data, requesting
- # kSecOIDSubjectAltName.
- # b. Mess about with this dictionary to try to get the SANs out.
- #
- # This is gross. Really gross. It's going to be a few hundred LoC extra
- # just to repeat something that SecureTransport can *already do*. So my
- # operating assumption at this time is that what we want to do is
- # instead to just flag to urllib3 that it shouldn't do its own hostname
- # validation when using SecureTransport.
- if not binary_form:
- raise ValueError(
- "SecureTransport only supports dumping binary certs"
- )
- trust = Security.SecTrustRef()
- certdata = None
- der_bytes = None
-
- try:
- # Grab the trust store.
- result = Security.SSLCopyPeerTrust(
- self.context, ctypes.byref(trust)
- )
- _assert_no_error(result)
- if not trust:
- # Probably we haven't done the handshake yet. No biggie.
- return None
-
- cert_count = Security.SecTrustGetCertificateCount(trust)
- if not cert_count:
- # Also a case that might happen if we haven't handshaked.
- # Handshook? Handshaken?
- return None
-
- leaf = Security.SecTrustGetCertificateAtIndex(trust, 0)
- assert leaf
-
- # Ok, now we want the DER bytes.
- certdata = Security.SecCertificateCopyData(leaf)
- assert certdata
-
- data_length = CoreFoundation.CFDataGetLength(certdata)
- data_buffer = CoreFoundation.CFDataGetBytePtr(certdata)
- der_bytes = ctypes.string_at(data_buffer, data_length)
- finally:
- if certdata:
- CoreFoundation.CFRelease(certdata)
- if trust:
- CoreFoundation.CFRelease(trust)
-
- return der_bytes
-
- def _reuse(self):
- self._makefile_refs += 1
-
- def _drop(self):
- if self._makefile_refs < 1:
- self.close()
- else:
- self._makefile_refs -= 1
-
-
-if _fileobject: # Platform-specific: Python 2
- def makefile(self, mode, bufsize=-1):
- self._makefile_refs += 1
- return _fileobject(self, mode, bufsize, close=True)
-else: # Platform-specific: Python 3
- def makefile(self, mode="r", buffering=None, *args, **kwargs):
- # We disable buffering with SecureTransport because it conflicts with
- # the buffering that ST does internally (see issue #1153 for more).
- buffering = 0
- return backport_makefile(self, mode, buffering, *args, **kwargs)
-
-WrappedSocket.makefile = makefile
-
-
-class SecureTransportContext(object):
- """
- I am a wrapper class for the SecureTransport library, to translate the
- interface of the standard library ``SSLContext`` object to calls into
- SecureTransport.
- """
- def __init__(self, protocol):
- self._min_version, self._max_version = _protocol_to_min_max[protocol]
- self._options = 0
- self._verify = False
- self._trust_bundle = None
- self._client_cert = None
- self._client_key = None
- self._client_key_passphrase = None
-
- @property
- def check_hostname(self):
- """
- SecureTransport cannot have its hostname checking disabled. For more,
- see the comment on getpeercert() in this file.
- """
- return True
-
- @check_hostname.setter
- def check_hostname(self, value):
- """
- SecureTransport cannot have its hostname checking disabled. For more,
- see the comment on getpeercert() in this file.
- """
- pass
-
- @property
- def options(self):
- # TODO: Well, crap.
- #
- # So this is the bit of the code that is the most likely to cause us
- # trouble. Essentially we need to enumerate all of the SSL options that
- # users might want to use and try to see if we can sensibly translate
- # them, or whether we should just ignore them.
- return self._options
-
- @options.setter
- def options(self, value):
- # TODO: Update in line with above.
- self._options = value
-
- @property
- def verify_mode(self):
- return ssl.CERT_REQUIRED if self._verify else ssl.CERT_NONE
-
- @verify_mode.setter
- def verify_mode(self, value):
- self._verify = True if value == ssl.CERT_REQUIRED else False
-
- def set_default_verify_paths(self):
- # So, this has to do something a bit weird. Specifically, what it does
- # is nothing.
- #
- # This means that, if we had previously had load_verify_locations
- # called, this does not undo that. We need to do that because it turns
- # out that the rest of the urllib3 code will attempt to load the
- # default verify paths if it hasn't been told about any paths, even if
- # the context itself was sometime earlier. We resolve that by just
- # ignoring it.
- pass
-
- def load_default_certs(self):
- return self.set_default_verify_paths()
-
- def set_ciphers(self, ciphers):
- # For now, we just require the default cipher string.
- if ciphers != util.ssl_.DEFAULT_CIPHERS:
- raise ValueError(
- "SecureTransport doesn't support custom cipher strings"
- )
-
- def load_verify_locations(self, cafile=None, capath=None, cadata=None):
- # OK, we only really support cadata and cafile.
- if capath is not None:
- raise ValueError(
- "SecureTransport does not support cert directories"
- )
-
- self._trust_bundle = cafile or cadata
-
- def load_cert_chain(self, certfile, keyfile=None, password=None):
- self._client_cert = certfile
- self._client_key = keyfile
- self._client_cert_passphrase = password
-
- def wrap_socket(self, sock, server_side=False,
- do_handshake_on_connect=True, suppress_ragged_eofs=True,
- server_hostname=None):
- # So, what do we do here? Firstly, we assert some properties. This is a
- # stripped down shim, so there is some functionality we don't support.
- # See PEP 543 for the real deal.
- assert not server_side
- assert do_handshake_on_connect
- assert suppress_ragged_eofs
-
- # Ok, we're good to go. Now we want to create the wrapped socket object
- # and store it in the appropriate place.
- wrapped_socket = WrappedSocket(sock)
-
- # Now we can handshake
- wrapped_socket.handshake(
- server_hostname, self._verify, self._trust_bundle,
- self._min_version, self._max_version, self._client_cert,
- self._client_key, self._client_key_passphrase
- )
- return wrapped_socket
diff --git a/src/collectors/python.d.plugin/python_modules/urllib3/contrib/socks.py b/src/collectors/python.d.plugin/python_modules/urllib3/contrib/socks.py
deleted file mode 100644
index 1cb79285b..000000000
--- a/src/collectors/python.d.plugin/python_modules/urllib3/contrib/socks.py
+++ /dev/null
@@ -1,189 +0,0 @@
-# -*- coding: utf-8 -*-
-# SPDX-License-Identifier: MIT
-"""
-This module contains provisional support for SOCKS proxies from within
-urllib3. This module supports SOCKS4 (specifically the SOCKS4A variant) and
-SOCKS5. To enable its functionality, either install PySocks or install this
-module with the ``socks`` extra.
-
-The SOCKS implementation supports the full range of urllib3 features. It also
-supports the following SOCKS features:
-
-- SOCKS4
-- SOCKS4a
-- SOCKS5
-- Usernames and passwords for the SOCKS proxy
-
-Known Limitations:
-
-- Currently PySocks does not support contacting remote websites via literal
- IPv6 addresses. Any such connection attempt will fail. You must use a domain
- name.
-- Currently PySocks does not support IPv6 connections to the SOCKS proxy. Any
- such connection attempt will fail.
-"""
-from __future__ import absolute_import
-
-try:
- import socks
-except ImportError:
- import warnings
- from ..exceptions import DependencyWarning
-
- warnings.warn((
- 'SOCKS support in urllib3 requires the installation of optional '
- 'dependencies: specifically, PySocks. For more information, see '
- 'https://urllib3.readthedocs.io/en/latest/contrib.html#socks-proxies'
- ),
- DependencyWarning
- )
- raise
-
-from socket import error as SocketError, timeout as SocketTimeout
-
-from ..connection import (
- HTTPConnection, HTTPSConnection
-)
-from ..connectionpool import (
- HTTPConnectionPool, HTTPSConnectionPool
-)
-from ..exceptions import ConnectTimeoutError, NewConnectionError
-from ..poolmanager import PoolManager
-from ..util.url import parse_url
-
-try:
- import ssl
-except ImportError:
- ssl = None
-
-
-class SOCKSConnection(HTTPConnection):
- """
- A plain-text HTTP connection that connects via a SOCKS proxy.
- """
- def __init__(self, *args, **kwargs):
- self._socks_options = kwargs.pop('_socks_options')
- super(SOCKSConnection, self).__init__(*args, **kwargs)
-
- def _new_conn(self):
- """
- Establish a new connection via the SOCKS proxy.
- """
- extra_kw = {}
- if self.source_address:
- extra_kw['source_address'] = self.source_address
-
- if self.socket_options:
- extra_kw['socket_options'] = self.socket_options
-
- try:
- conn = socks.create_connection(
- (self.host, self.port),
- proxy_type=self._socks_options['socks_version'],
- proxy_addr=self._socks_options['proxy_host'],
- proxy_port=self._socks_options['proxy_port'],
- proxy_username=self._socks_options['username'],
- proxy_password=self._socks_options['password'],
- proxy_rdns=self._socks_options['rdns'],
- timeout=self.timeout,
- **extra_kw
- )
-
- except SocketTimeout as e:
- raise ConnectTimeoutError(
- self, "Connection to %s timed out. (connect timeout=%s)" %
- (self.host, self.timeout))
-
- except socks.ProxyError as e:
- # This is fragile as hell, but it seems to be the only way to raise
- # useful errors here.
- if e.socket_err:
- error = e.socket_err
- if isinstance(error, SocketTimeout):
- raise ConnectTimeoutError(
- self,
- "Connection to %s timed out. (connect timeout=%s)" %
- (self.host, self.timeout)
- )
- else:
- raise NewConnectionError(
- self,
- "Failed to establish a new connection: %s" % error
- )
- else:
- raise NewConnectionError(
- self,
- "Failed to establish a new connection: %s" % e
- )
-
- except SocketError as e: # Defensive: PySocks should catch all these.
- raise NewConnectionError(
- self, "Failed to establish a new connection: %s" % e)
-
- return conn
-
-
-# We don't need to duplicate the Verified/Unverified distinction from
-# urllib3/connection.py here because the HTTPSConnection will already have been
-# correctly set to either the Verified or Unverified form by that module. This
-# means the SOCKSHTTPSConnection will automatically be the correct type.
-class SOCKSHTTPSConnection(SOCKSConnection, HTTPSConnection):
- pass
-
-
-class SOCKSHTTPConnectionPool(HTTPConnectionPool):
- ConnectionCls = SOCKSConnection
-
-
-class SOCKSHTTPSConnectionPool(HTTPSConnectionPool):
- ConnectionCls = SOCKSHTTPSConnection
-
-
-class SOCKSProxyManager(PoolManager):
- """
- A version of the urllib3 ProxyManager that routes connections via the
- defined SOCKS proxy.
- """
- pool_classes_by_scheme = {
- 'http': SOCKSHTTPConnectionPool,
- 'https': SOCKSHTTPSConnectionPool,
- }
-
- def __init__(self, proxy_url, username=None, password=None,
- num_pools=10, headers=None, **connection_pool_kw):
- parsed = parse_url(proxy_url)
-
- if parsed.scheme == 'socks5':
- socks_version = socks.PROXY_TYPE_SOCKS5
- rdns = False
- elif parsed.scheme == 'socks5h':
- socks_version = socks.PROXY_TYPE_SOCKS5
- rdns = True
- elif parsed.scheme == 'socks4':
- socks_version = socks.PROXY_TYPE_SOCKS4
- rdns = False
- elif parsed.scheme == 'socks4a':
- socks_version = socks.PROXY_TYPE_SOCKS4
- rdns = True
- else:
- raise ValueError(
- "Unable to determine SOCKS version from %s" % proxy_url
- )
-
- self.proxy_url = proxy_url
-
- socks_options = {
- 'socks_version': socks_version,
- 'proxy_host': parsed.host,
- 'proxy_port': parsed.port,
- 'username': username,
- 'password': password,
- 'rdns': rdns
- }
- connection_pool_kw['_socks_options'] = socks_options
-
- super(SOCKSProxyManager, self).__init__(
- num_pools, headers, **connection_pool_kw
- )
-
- self.pool_classes_by_scheme = SOCKSProxyManager.pool_classes_by_scheme
diff --git a/src/collectors/python.d.plugin/python_modules/urllib3/exceptions.py b/src/collectors/python.d.plugin/python_modules/urllib3/exceptions.py
deleted file mode 100644
index a71cabe06..000000000
--- a/src/collectors/python.d.plugin/python_modules/urllib3/exceptions.py
+++ /dev/null
@@ -1,247 +0,0 @@
-# SPDX-License-Identifier: MIT
-from __future__ import absolute_import
-from .packages.six.moves.http_client import (
- IncompleteRead as httplib_IncompleteRead
-)
-# Base Exceptions
-
-
-class HTTPError(Exception):
- "Base exception used by this module."
- pass
-
-
-class HTTPWarning(Warning):
- "Base warning used by this module."
- pass
-
-
-class PoolError(HTTPError):
- "Base exception for errors caused within a pool."
- def __init__(self, pool, message):
- self.pool = pool
- HTTPError.__init__(self, "%s: %s" % (pool, message))
-
- def __reduce__(self):
- # For pickling purposes.
- return self.__class__, (None, None)
-
-
-class RequestError(PoolError):
- "Base exception for PoolErrors that have associated URLs."
- def __init__(self, pool, url, message):
- self.url = url
- PoolError.__init__(self, pool, message)
-
- def __reduce__(self):
- # For pickling purposes.
- return self.__class__, (None, self.url, None)
-
-
-class SSLError(HTTPError):
- "Raised when SSL certificate fails in an HTTPS connection."
- pass
-
-
-class ProxyError(HTTPError):
- "Raised when the connection to a proxy fails."
- pass
-
-
-class DecodeError(HTTPError):
- "Raised when automatic decoding based on Content-Type fails."
- pass
-
-
-class ProtocolError(HTTPError):
- "Raised when something unexpected happens mid-request/response."
- pass
-
-
-#: Renamed to ProtocolError but aliased for backwards compatibility.
-ConnectionError = ProtocolError
-
-
-# Leaf Exceptions
-
-class MaxRetryError(RequestError):
- """Raised when the maximum number of retries is exceeded.
-
- :param pool: The connection pool
- :type pool: :class:`~urllib3.connectionpool.HTTPConnectionPool`
- :param string url: The requested Url
- :param exceptions.Exception reason: The underlying error
-
- """
-
- def __init__(self, pool, url, reason=None):
- self.reason = reason
-
- message = "Max retries exceeded with url: %s (Caused by %r)" % (
- url, reason)
-
- RequestError.__init__(self, pool, url, message)
-
-
-class HostChangedError(RequestError):
- "Raised when an existing pool gets a request for a foreign host."
-
- def __init__(self, pool, url, retries=3):
- message = "Tried to open a foreign host with url: %s" % url
- RequestError.__init__(self, pool, url, message)
- self.retries = retries
-
-
-class TimeoutStateError(HTTPError):
- """ Raised when passing an invalid state to a timeout """
- pass
-
-
-class TimeoutError(HTTPError):
- """ Raised when a socket timeout error occurs.
-
- Catching this error will catch both :exc:`ReadTimeoutErrors
- <ReadTimeoutError>` and :exc:`ConnectTimeoutErrors <ConnectTimeoutError>`.
- """
- pass
-
-
-class ReadTimeoutError(TimeoutError, RequestError):
- "Raised when a socket timeout occurs while receiving data from a server"
- pass
-
-
-# This timeout error does not have a URL attached and needs to inherit from the
-# base HTTPError
-class ConnectTimeoutError(TimeoutError):
- "Raised when a socket timeout occurs while connecting to a server"
- pass
-
-
-class NewConnectionError(ConnectTimeoutError, PoolError):
- "Raised when we fail to establish a new connection. Usually ECONNREFUSED."
- pass
-
-
-class EmptyPoolError(PoolError):
- "Raised when a pool runs out of connections and no more are allowed."
- pass
-
-
-class ClosedPoolError(PoolError):
- "Raised when a request enters a pool after the pool has been closed."
- pass
-
-
-class LocationValueError(ValueError, HTTPError):
- "Raised when there is something wrong with a given URL input."
- pass
-
-
-class LocationParseError(LocationValueError):
- "Raised when get_host or similar fails to parse the URL input."
-
- def __init__(self, location):
- message = "Failed to parse: %s" % location
- HTTPError.__init__(self, message)
-
- self.location = location
-
-
-class ResponseError(HTTPError):
- "Used as a container for an error reason supplied in a MaxRetryError."
- GENERIC_ERROR = 'too many error responses'
- SPECIFIC_ERROR = 'too many {status_code} error responses'
-
-
-class SecurityWarning(HTTPWarning):
- "Warned when perfoming security reducing actions"
- pass
-
-
-class SubjectAltNameWarning(SecurityWarning):
- "Warned when connecting to a host with a certificate missing a SAN."
- pass
-
-
-class InsecureRequestWarning(SecurityWarning):
- "Warned when making an unverified HTTPS request."
- pass
-
-
-class SystemTimeWarning(SecurityWarning):
- "Warned when system time is suspected to be wrong"
- pass
-
-
-class InsecurePlatformWarning(SecurityWarning):
- "Warned when certain SSL configuration is not available on a platform."
- pass
-
-
-class SNIMissingWarning(HTTPWarning):
- "Warned when making a HTTPS request without SNI available."
- pass
-
-
-class DependencyWarning(HTTPWarning):
- """
- Warned when an attempt is made to import a module with missing optional
- dependencies.
- """
- pass
-
-
-class ResponseNotChunked(ProtocolError, ValueError):
- "Response needs to be chunked in order to read it as chunks."
- pass
-
-
-class BodyNotHttplibCompatible(HTTPError):
- """
- Body should be httplib.HTTPResponse like (have an fp attribute which
- returns raw chunks) for read_chunked().
- """
- pass
-
-
-class IncompleteRead(HTTPError, httplib_IncompleteRead):
- """
- Response length doesn't match expected Content-Length
-
- Subclass of http_client.IncompleteRead to allow int value
- for `partial` to avoid creating large objects on streamed
- reads.
- """
- def __init__(self, partial, expected):
- super(IncompleteRead, self).__init__(partial, expected)
-
- def __repr__(self):
- return ('IncompleteRead(%i bytes read, '
- '%i more expected)' % (self.partial, self.expected))
-
-
-class InvalidHeader(HTTPError):
- "The header provided was somehow invalid."
- pass
-
-
-class ProxySchemeUnknown(AssertionError, ValueError):
- "ProxyManager does not support the supplied scheme"
- # TODO(t-8ch): Stop inheriting from AssertionError in v2.0.
-
- def __init__(self, scheme):
- message = "Not supported proxy scheme %s" % scheme
- super(ProxySchemeUnknown, self).__init__(message)
-
-
-class HeaderParsingError(HTTPError):
- "Raised by assert_header_parsing, but we convert it to a log.warning statement."
- def __init__(self, defects, unparsed_data):
- message = '%s, unparsed data: %r' % (defects or 'Unknown', unparsed_data)
- super(HeaderParsingError, self).__init__(message)
-
-
-class UnrewindableBodyError(HTTPError):
- "urllib3 encountered an error when trying to rewind a body"
- pass
diff --git a/src/collectors/python.d.plugin/python_modules/urllib3/fields.py b/src/collectors/python.d.plugin/python_modules/urllib3/fields.py
deleted file mode 100644
index de7577b74..000000000
--- a/src/collectors/python.d.plugin/python_modules/urllib3/fields.py
+++ /dev/null
@@ -1,179 +0,0 @@
-# SPDX-License-Identifier: MIT
-from __future__ import absolute_import
-import email.utils
-import mimetypes
-
-from .packages import six
-
-
-def guess_content_type(filename, default='application/octet-stream'):
- """
- Guess the "Content-Type" of a file.
-
- :param filename:
- The filename to guess the "Content-Type" of using :mod:`mimetypes`.
- :param default:
- If no "Content-Type" can be guessed, default to `default`.
- """
- if filename:
- return mimetypes.guess_type(filename)[0] or default
- return default
-
-
-def format_header_param(name, value):
- """
- Helper function to format and quote a single header parameter.
-
- Particularly useful for header parameters which might contain
- non-ASCII values, like file names. This follows RFC 2231, as
- suggested by RFC 2388 Section 4.4.
-
- :param name:
- The name of the parameter, a string expected to be ASCII only.
- :param value:
- The value of the parameter, provided as a unicode string.
- """
- if not any(ch in value for ch in '"\\\r\n'):
- result = '%s="%s"' % (name, value)
- try:
- result.encode('ascii')
- except (UnicodeEncodeError, UnicodeDecodeError):
- pass
- else:
- return result
- if not six.PY3 and isinstance(value, six.text_type): # Python 2:
- value = value.encode('utf-8')
- value = email.utils.encode_rfc2231(value, 'utf-8')
- value = '%s*=%s' % (name, value)
- return value
-
-
-class RequestField(object):
- """
- A data container for request body parameters.
-
- :param name:
- The name of this request field.
- :param data:
- The data/value body.
- :param filename:
- An optional filename of the request field.
- :param headers:
- An optional dict-like object of headers to initially use for the field.
- """
- def __init__(self, name, data, filename=None, headers=None):
- self._name = name
- self._filename = filename
- self.data = data
- self.headers = {}
- if headers:
- self.headers = dict(headers)
-
- @classmethod
- def from_tuples(cls, fieldname, value):
- """
- A :class:`~urllib3.fields.RequestField` factory from old-style tuple parameters.
-
- Supports constructing :class:`~urllib3.fields.RequestField` from
- parameter of key/value strings AND key/filetuple. A filetuple is a
- (filename, data, MIME type) tuple where the MIME type is optional.
- For example::
-
- 'foo': 'bar',
- 'fakefile': ('foofile.txt', 'contents of foofile'),
- 'realfile': ('barfile.txt', open('realfile').read()),
- 'typedfile': ('bazfile.bin', open('bazfile').read(), 'image/jpeg'),
- 'nonamefile': 'contents of nonamefile field',
-
- Field names and filenames must be unicode.
- """
- if isinstance(value, tuple):
- if len(value) == 3:
- filename, data, content_type = value
- else:
- filename, data = value
- content_type = guess_content_type(filename)
- else:
- filename = None
- content_type = None
- data = value
-
- request_param = cls(fieldname, data, filename=filename)
- request_param.make_multipart(content_type=content_type)
-
- return request_param
-
- def _render_part(self, name, value):
- """
- Overridable helper function to format a single header parameter.
-
- :param name:
- The name of the parameter, a string expected to be ASCII only.
- :param value:
- The value of the parameter, provided as a unicode string.
- """
- return format_header_param(name, value)
-
- def _render_parts(self, header_parts):
- """
- Helper function to format and quote a single header.
-
- Useful for single headers that are composed of multiple items. E.g.,
- 'Content-Disposition' fields.
-
- :param header_parts:
- A sequence of (k, v) typles or a :class:`dict` of (k, v) to format
- as `k1="v1"; k2="v2"; ...`.
- """
- parts = []
- iterable = header_parts
- if isinstance(header_parts, dict):
- iterable = header_parts.items()
-
- for name, value in iterable:
- if value is not None:
- parts.append(self._render_part(name, value))
-
- return '; '.join(parts)
-
- def render_headers(self):
- """
- Renders the headers for this request field.
- """
- lines = []
-
- sort_keys = ['Content-Disposition', 'Content-Type', 'Content-Location']
- for sort_key in sort_keys:
- if self.headers.get(sort_key, False):
- lines.append('%s: %s' % (sort_key, self.headers[sort_key]))
-
- for header_name, header_value in self.headers.items():
- if header_name not in sort_keys:
- if header_value:
- lines.append('%s: %s' % (header_name, header_value))
-
- lines.append('\r\n')
- return '\r\n'.join(lines)
-
- def make_multipart(self, content_disposition=None, content_type=None,
- content_location=None):
- """
- Makes this request field into a multipart request field.
-
- This method overrides "Content-Disposition", "Content-Type" and
- "Content-Location" headers to the request parameter.
-
- :param content_type:
- The 'Content-Type' of the request body.
- :param content_location:
- The 'Content-Location' of the request body.
-
- """
- self.headers['Content-Disposition'] = content_disposition or 'form-data'
- self.headers['Content-Disposition'] += '; '.join([
- '', self._render_parts(
- (('name', self._name), ('filename', self._filename))
- )
- ])
- self.headers['Content-Type'] = content_type
- self.headers['Content-Location'] = content_location
diff --git a/src/collectors/python.d.plugin/python_modules/urllib3/filepost.py b/src/collectors/python.d.plugin/python_modules/urllib3/filepost.py
deleted file mode 100644
index 3febc9cfe..000000000
--- a/src/collectors/python.d.plugin/python_modules/urllib3/filepost.py
+++ /dev/null
@@ -1,95 +0,0 @@
-# SPDX-License-Identifier: MIT
-from __future__ import absolute_import
-import codecs
-
-from uuid import uuid4
-from io import BytesIO
-
-from .packages import six
-from .packages.six import b
-from .fields import RequestField
-
-writer = codecs.lookup('utf-8')[3]
-
-
-def choose_boundary():
- """
- Our embarrassingly-simple replacement for mimetools.choose_boundary.
- """
- return uuid4().hex
-
-
-def iter_field_objects(fields):
- """
- Iterate over fields.
-
- Supports list of (k, v) tuples and dicts, and lists of
- :class:`~urllib3.fields.RequestField`.
-
- """
- if isinstance(fields, dict):
- i = six.iteritems(fields)
- else:
- i = iter(fields)
-
- for field in i:
- if isinstance(field, RequestField):
- yield field
- else:
- yield RequestField.from_tuples(*field)
-
-
-def iter_fields(fields):
- """
- .. deprecated:: 1.6
-
- Iterate over fields.
-
- The addition of :class:`~urllib3.fields.RequestField` makes this function
- obsolete. Instead, use :func:`iter_field_objects`, which returns
- :class:`~urllib3.fields.RequestField` objects.
-
- Supports list of (k, v) tuples and dicts.
- """
- if isinstance(fields, dict):
- return ((k, v) for k, v in six.iteritems(fields))
-
- return ((k, v) for k, v in fields)
-
-
-def encode_multipart_formdata(fields, boundary=None):
- """
- Encode a dictionary of ``fields`` using the multipart/form-data MIME format.
-
- :param fields:
- Dictionary of fields or list of (key, :class:`~urllib3.fields.RequestField`).
-
- :param boundary:
- If not specified, then a random boundary will be generated using
- :func:`mimetools.choose_boundary`.
- """
- body = BytesIO()
- if boundary is None:
- boundary = choose_boundary()
-
- for field in iter_field_objects(fields):
- body.write(b('--%s\r\n' % (boundary)))
-
- writer(body).write(field.render_headers())
- data = field.data
-
- if isinstance(data, int):
- data = str(data) # Backwards compatibility
-
- if isinstance(data, six.text_type):
- writer(body).write(data)
- else:
- body.write(data)
-
- body.write(b'\r\n')
-
- body.write(b('--%s--\r\n' % (boundary)))
-
- content_type = str('multipart/form-data; boundary=%s' % boundary)
-
- return body.getvalue(), content_type
diff --git a/src/collectors/python.d.plugin/python_modules/urllib3/packages/__init__.py b/src/collectors/python.d.plugin/python_modules/urllib3/packages/__init__.py
deleted file mode 100644
index 170e974c1..000000000
--- a/src/collectors/python.d.plugin/python_modules/urllib3/packages/__init__.py
+++ /dev/null
@@ -1,5 +0,0 @@
-from __future__ import absolute_import
-
-from . import ssl_match_hostname
-
-__all__ = ('ssl_match_hostname', )
diff --git a/src/collectors/python.d.plugin/python_modules/urllib3/packages/backports/__init__.py b/src/collectors/python.d.plugin/python_modules/urllib3/packages/backports/__init__.py
deleted file mode 100644
index e69de29bb..000000000
--- a/src/collectors/python.d.plugin/python_modules/urllib3/packages/backports/__init__.py
+++ /dev/null
diff --git a/src/collectors/python.d.plugin/python_modules/urllib3/packages/backports/makefile.py b/src/collectors/python.d.plugin/python_modules/urllib3/packages/backports/makefile.py
deleted file mode 100644
index 8ab122f8b..000000000
--- a/src/collectors/python.d.plugin/python_modules/urllib3/packages/backports/makefile.py
+++ /dev/null
@@ -1,54 +0,0 @@
-# -*- coding: utf-8 -*-
-# SPDX-License-Identifier: MIT
-"""
-backports.makefile
-~~~~~~~~~~~~~~~~~~
-
-Backports the Python 3 ``socket.makefile`` method for use with anything that
-wants to create a "fake" socket object.
-"""
-import io
-
-from socket import SocketIO
-
-
-def backport_makefile(self, mode="r", buffering=None, encoding=None,
- errors=None, newline=None):
- """
- Backport of ``socket.makefile`` from Python 3.5.
- """
- if not set(mode) <= set(["r", "w", "b"]):
- raise ValueError(
- "invalid mode %r (only r, w, b allowed)" % (mode,)
- )
- writing = "w" in mode
- reading = "r" in mode or not writing
- assert reading or writing
- binary = "b" in mode
- rawmode = ""
- if reading:
- rawmode += "r"
- if writing:
- rawmode += "w"
- raw = SocketIO(self, rawmode)
- self._makefile_refs += 1
- if buffering is None:
- buffering = -1
- if buffering < 0:
- buffering = io.DEFAULT_BUFFER_SIZE
- if buffering == 0:
- if not binary:
- raise ValueError("unbuffered streams must be binary")
- return raw
- if reading and writing:
- buffer = io.BufferedRWPair(raw, raw, buffering)
- elif reading:
- buffer = io.BufferedReader(raw, buffering)
- else:
- assert writing
- buffer = io.BufferedWriter(raw, buffering)
- if binary:
- return buffer
- text = io.TextIOWrapper(buffer, encoding, errors, newline)
- text.mode = mode
- return text
diff --git a/src/collectors/python.d.plugin/python_modules/urllib3/packages/ordered_dict.py b/src/collectors/python.d.plugin/python_modules/urllib3/packages/ordered_dict.py
deleted file mode 100644
index 9f7c0e6b8..000000000
--- a/src/collectors/python.d.plugin/python_modules/urllib3/packages/ordered_dict.py
+++ /dev/null
@@ -1,260 +0,0 @@
-# Backport of OrderedDict() class that runs on Python 2.4, 2.5, 2.6, 2.7 and pypy.
-# Passes Python2.7's test suite and incorporates all the latest updates.
-# Copyright 2009 Raymond Hettinger, released under the MIT License.
-# http://code.activestate.com/recipes/576693/
-# SPDX-License-Identifier: MIT
-try:
- from thread import get_ident as _get_ident
-except ImportError:
- from dummy_thread import get_ident as _get_ident
-
-try:
- from _abcoll import KeysView, ValuesView, ItemsView
-except ImportError:
- pass
-
-
-class OrderedDict(dict):
- 'Dictionary that remembers insertion order'
- # An inherited dict maps keys to values.
- # The inherited dict provides __getitem__, __len__, __contains__, and get.
- # The remaining methods are order-aware.
- # Big-O running times for all methods are the same as for regular dictionaries.
-
- # The internal self.__map dictionary maps keys to links in a doubly linked list.
- # The circular doubly linked list starts and ends with a sentinel element.
- # The sentinel element never gets deleted (this simplifies the algorithm).
- # Each link is stored as a list of length three: [PREV, NEXT, KEY].
-
- def __init__(self, *args, **kwds):
- '''Initialize an ordered dictionary. Signature is the same as for
- regular dictionaries, but keyword arguments are not recommended
- because their insertion order is arbitrary.
-
- '''
- if len(args) > 1:
- raise TypeError('expected at most 1 arguments, got %d' % len(args))
- try:
- self.__root
- except AttributeError:
- self.__root = root = [] # sentinel node
- root[:] = [root, root, None]
- self.__map = {}
- self.__update(*args, **kwds)
-
- def __setitem__(self, key, value, dict_setitem=dict.__setitem__):
- 'od.__setitem__(i, y) <==> od[i]=y'
- # Setting a new item creates a new link which goes at the end of the linked
- # list, and the inherited dictionary is updated with the new key/value pair.
- if key not in self:
- root = self.__root
- last = root[0]
- last[1] = root[0] = self.__map[key] = [last, root, key]
- dict_setitem(self, key, value)
-
- def __delitem__(self, key, dict_delitem=dict.__delitem__):
- 'od.__delitem__(y) <==> del od[y]'
- # Deleting an existing item uses self.__map to find the link which is
- # then removed by updating the links in the predecessor and successor nodes.
- dict_delitem(self, key)
- link_prev, link_next, key = self.__map.pop(key)
- link_prev[1] = link_next
- link_next[0] = link_prev
-
- def __iter__(self):
- 'od.__iter__() <==> iter(od)'
- root = self.__root
- curr = root[1]
- while curr is not root:
- yield curr[2]
- curr = curr[1]
-
- def __reversed__(self):
- 'od.__reversed__() <==> reversed(od)'
- root = self.__root
- curr = root[0]
- while curr is not root:
- yield curr[2]
- curr = curr[0]
-
- def clear(self):
- 'od.clear() -> None. Remove all items from od.'
- try:
- for node in self.__map.itervalues():
- del node[:]
- root = self.__root
- root[:] = [root, root, None]
- self.__map.clear()
- except AttributeError:
- pass
- dict.clear(self)
-
- def popitem(self, last=True):
- '''od.popitem() -> (k, v), return and remove a (key, value) pair.
- Pairs are returned in LIFO order if last is true or FIFO order if false.
-
- '''
- if not self:
- raise KeyError('dictionary is empty')
- root = self.__root
- if last:
- link = root[0]
- link_prev = link[0]
- link_prev[1] = root
- root[0] = link_prev
- else:
- link = root[1]
- link_next = link[1]
- root[1] = link_next
- link_next[0] = root
- key = link[2]
- del self.__map[key]
- value = dict.pop(self, key)
- return key, value
-
- # -- the following methods do not depend on the internal structure --
-
- def keys(self):
- 'od.keys() -> list of keys in od'
- return list(self)
-
- def values(self):
- 'od.values() -> list of values in od'
- return [self[key] for key in self]
-
- def items(self):
- 'od.items() -> list of (key, value) pairs in od'
- return [(key, self[key]) for key in self]
-
- def iterkeys(self):
- 'od.iterkeys() -> an iterator over the keys in od'
- return iter(self)
-
- def itervalues(self):
- 'od.itervalues -> an iterator over the values in od'
- for k in self:
- yield self[k]
-
- def iteritems(self):
- 'od.iteritems -> an iterator over the (key, value) items in od'
- for k in self:
- yield (k, self[k])
-
- def update(*args, **kwds):
- '''od.update(E, **F) -> None. Update od from dict/iterable E and F.
-
- If E is a dict instance, does: for k in E: od[k] = E[k]
- If E has a .keys() method, does: for k in E.keys(): od[k] = E[k]
- Or if E is an iterable of items, does: for k, v in E: od[k] = v
- In either case, this is followed by: for k, v in F.items(): od[k] = v
-
- '''
- if len(args) > 2:
- raise TypeError('update() takes at most 2 positional '
- 'arguments (%d given)' % (len(args),))
- elif not args:
- raise TypeError('update() takes at least 1 argument (0 given)')
- self = args[0]
- # Make progressively weaker assumptions about "other"
- other = ()
- if len(args) == 2:
- other = args[1]
- if isinstance(other, dict):
- for key in other:
- self[key] = other[key]
- elif hasattr(other, 'keys'):
- for key in other.keys():
- self[key] = other[key]
- else:
- for key, value in other:
- self[key] = value
- for key, value in kwds.items():
- self[key] = value
-
- __update = update # let subclasses override update without breaking __init__
-
- __marker = object()
-
- def pop(self, key, default=__marker):
- '''od.pop(k[,d]) -> v, remove specified key and return the corresponding value.
- If key is not found, d is returned if given, otherwise KeyError is raised.
-
- '''
- if key in self:
- result = self[key]
- del self[key]
- return result
- if default is self.__marker:
- raise KeyError(key)
- return default
-
- def setdefault(self, key, default=None):
- 'od.setdefault(k[,d]) -> od.get(k,d), also set od[k]=d if k not in od'
- if key in self:
- return self[key]
- self[key] = default
- return default
-
- def __repr__(self, _repr_running={}):
- 'od.__repr__() <==> repr(od)'
- call_key = id(self), _get_ident()
- if call_key in _repr_running:
- return '...'
- _repr_running[call_key] = 1
- try:
- if not self:
- return '%s()' % (self.__class__.__name__,)
- return '%s(%r)' % (self.__class__.__name__, self.items())
- finally:
- del _repr_running[call_key]
-
- def __reduce__(self):
- 'Return state information for pickling'
- items = [[k, self[k]] for k in self]
- inst_dict = vars(self).copy()
- for k in vars(OrderedDict()):
- inst_dict.pop(k, None)
- if inst_dict:
- return (self.__class__, (items,), inst_dict)
- return self.__class__, (items,)
-
- def copy(self):
- 'od.copy() -> a shallow copy of od'
- return self.__class__(self)
-
- @classmethod
- def fromkeys(cls, iterable, value=None):
- '''OD.fromkeys(S[, v]) -> New ordered dictionary with keys from S
- and values equal to v (which defaults to None).
-
- '''
- d = cls()
- for key in iterable:
- d[key] = value
- return d
-
- def __eq__(self, other):
- '''od.__eq__(y) <==> od==y. Comparison to another OD is order-sensitive
- while comparison to a regular mapping is order-insensitive.
-
- '''
- if isinstance(other, OrderedDict):
- return len(self)==len(other) and self.items() == other.items()
- return dict.__eq__(self, other)
-
- def __ne__(self, other):
- return not self == other
-
- # -- the following methods are only used in Python 2.7 --
-
- def viewkeys(self):
- "od.viewkeys() -> a set-like object providing a view on od's keys"
- return KeysView(self)
-
- def viewvalues(self):
- "od.viewvalues() -> an object providing a view on od's values"
- return ValuesView(self)
-
- def viewitems(self):
- "od.viewitems() -> a set-like object providing a view on od's items"
- return ItemsView(self)
diff --git a/src/collectors/python.d.plugin/python_modules/urllib3/packages/six.py b/src/collectors/python.d.plugin/python_modules/urllib3/packages/six.py
deleted file mode 100644
index 31df5012b..000000000
--- a/src/collectors/python.d.plugin/python_modules/urllib3/packages/six.py
+++ /dev/null
@@ -1,852 +0,0 @@
-"""Utilities for writing code that runs on Python 2 and 3"""
-
-# Copyright (c) 2010-2015 Benjamin Peterson
-#
-# SPDX-License-Identifier: MIT
-
-from __future__ import absolute_import
-
-import functools
-import itertools
-import operator
-import sys
-import types
-
-__author__ = "Benjamin Peterson <benjamin@python.org>"
-__version__ = "1.10.0"
-
-
-# Useful for very coarse version differentiation.
-PY2 = sys.version_info[0] == 2
-PY3 = sys.version_info[0] == 3
-PY34 = sys.version_info[0:2] >= (3, 4)
-
-if PY3:
- string_types = str,
- integer_types = int,
- class_types = type,
- text_type = str
- binary_type = bytes
-
- MAXSIZE = sys.maxsize
-else:
- string_types = basestring,
- integer_types = (int, long)
- class_types = (type, types.ClassType)
- text_type = unicode
- binary_type = str
-
- if sys.platform.startswith("java"):
- # Jython always uses 32 bits.
- MAXSIZE = int((1 << 31) - 1)
- else:
- # It's possible to have sizeof(long) != sizeof(Py_ssize_t).
- class X(object):
-
- def __len__(self):
- return 1 << 31
- try:
- len(X())
- except OverflowError:
- # 32-bit
- MAXSIZE = int((1 << 31) - 1)
- else:
- # 64-bit
- MAXSIZE = int((1 << 63) - 1)
- del X
-
-
-def _add_doc(func, doc):
- """Add documentation to a function."""
- func.__doc__ = doc
-
-
-def _import_module(name):
- """Import module, returning the module after the last dot."""
- __import__(name)
- return sys.modules[name]
-
-
-class _LazyDescr(object):
-
- def __init__(self, name):
- self.name = name
-
- def __get__(self, obj, tp):
- result = self._resolve()
- setattr(obj, self.name, result) # Invokes __set__.
- try:
- # This is a bit ugly, but it avoids running this again by
- # removing this descriptor.
- delattr(obj.__class__, self.name)
- except AttributeError:
- pass
- return result
-
-
-class MovedModule(_LazyDescr):
-
- def __init__(self, name, old, new=None):
- super(MovedModule, self).__init__(name)
- if PY3:
- if new is None:
- new = name
- self.mod = new
- else:
- self.mod = old
-
- def _resolve(self):
- return _import_module(self.mod)
-
- def __getattr__(self, attr):
- _module = self._resolve()
- value = getattr(_module, attr)
- setattr(self, attr, value)
- return value
-
-
-class _LazyModule(types.ModuleType):
-
- def __init__(self, name):
- super(_LazyModule, self).__init__(name)
- self.__doc__ = self.__class__.__doc__
-
- def __dir__(self):
- attrs = ["__doc__", "__name__"]
- attrs += [attr.name for attr in self._moved_attributes]
- return attrs
-
- # Subclasses should override this
- _moved_attributes = []
-
-
-class MovedAttribute(_LazyDescr):
-
- def __init__(self, name, old_mod, new_mod, old_attr=None, new_attr=None):
- super(MovedAttribute, self).__init__(name)
- if PY3:
- if new_mod is None:
- new_mod = name
- self.mod = new_mod
- if new_attr is None:
- if old_attr is None:
- new_attr = name
- else:
- new_attr = old_attr
- self.attr = new_attr
- else:
- self.mod = old_mod
- if old_attr is None:
- old_attr = name
- self.attr = old_attr
-
- def _resolve(self):
- module = _import_module(self.mod)
- return getattr(module, self.attr)
-
-
-class _SixMetaPathImporter(object):
-
- """
- A meta path importer to import six.moves and its submodules.
-
- This class implements a PEP302 finder and loader. It should be compatible
- with Python 2.5 and all existing versions of Python3
- """
-
- def __init__(self, six_module_name):
- self.name = six_module_name
- self.known_modules = {}
-
- def _add_module(self, mod, *fullnames):
- for fullname in fullnames:
- self.known_modules[self.name + "." + fullname] = mod
-
- def _get_module(self, fullname):
- return self.known_modules[self.name + "." + fullname]
-
- def find_module(self, fullname, path=None):
- if fullname in self.known_modules:
- return self
- return None
-
- def __get_module(self, fullname):
- try:
- return self.known_modules[fullname]
- except KeyError:
- raise ImportError("This loader does not know module " + fullname)
-
- def load_module(self, fullname):
- try:
- # in case of a reload
- return sys.modules[fullname]
- except KeyError:
- pass
- mod = self.__get_module(fullname)
- if isinstance(mod, MovedModule):
- mod = mod._resolve()
- else:
- mod.__loader__ = self
- sys.modules[fullname] = mod
- return mod
-
- def is_package(self, fullname):
- """
- Return true, if the named module is a package.
-
- We need this method to get correct spec objects with
- Python 3.4 (see PEP451)
- """
- return hasattr(self.__get_module(fullname), "__path__")
-
- def get_code(self, fullname):
- """Return None
-
- Required, if is_package is implemented"""
- self.__get_module(fullname) # eventually raises ImportError
- return None
- get_source = get_code # same as get_code
-
-_importer = _SixMetaPathImporter(__name__)
-
-
-class _MovedItems(_LazyModule):
-
- """Lazy loading of moved objects"""
- __path__ = [] # mark as package
-
-
-_moved_attributes = [
- MovedAttribute("cStringIO", "cStringIO", "io", "StringIO"),
- MovedAttribute("filter", "itertools", "builtins", "ifilter", "filter"),
- MovedAttribute("filterfalse", "itertools", "itertools", "ifilterfalse", "filterfalse"),
- MovedAttribute("input", "__builtin__", "builtins", "raw_input", "input"),
- MovedAttribute("intern", "__builtin__", "sys"),
- MovedAttribute("map", "itertools", "builtins", "imap", "map"),
- MovedAttribute("getcwd", "os", "os", "getcwdu", "getcwd"),
- MovedAttribute("getcwdb", "os", "os", "getcwd", "getcwdb"),
- MovedAttribute("range", "__builtin__", "builtins", "xrange", "range"),
- MovedAttribute("reload_module", "__builtin__", "importlib" if PY34 else "imp", "reload"),
- MovedAttribute("reduce", "__builtin__", "functools"),
- MovedAttribute("shlex_quote", "pipes", "shlex", "quote"),
- MovedAttribute("StringIO", "StringIO", "io"),
- MovedAttribute("UserDict", "UserDict", "collections"),
- MovedAttribute("UserList", "UserList", "collections"),
- MovedAttribute("UserString", "UserString", "collections"),
- MovedAttribute("xrange", "__builtin__", "builtins", "xrange", "range"),
- MovedAttribute("zip", "itertools", "builtins", "izip", "zip"),
- MovedAttribute("zip_longest", "itertools", "itertools", "izip_longest", "zip_longest"),
- MovedModule("builtins", "__builtin__"),
- MovedModule("configparser", "ConfigParser"),
- MovedModule("copyreg", "copy_reg"),
- MovedModule("dbm_gnu", "gdbm", "dbm.gnu"),
- MovedModule("_dummy_thread", "dummy_thread", "_dummy_thread"),
- MovedModule("http_cookiejar", "cookielib", "http.cookiejar"),
- MovedModule("http_cookies", "Cookie", "http.cookies"),
- MovedModule("html_entities", "htmlentitydefs", "html.entities"),
- MovedModule("html_parser", "HTMLParser", "html.parser"),
- MovedModule("http_client", "httplib", "http.client"),
- MovedModule("email_mime_multipart", "email.MIMEMultipart", "email.mime.multipart"),
- MovedModule("email_mime_nonmultipart", "email.MIMENonMultipart", "email.mime.nonmultipart"),
- MovedModule("email_mime_text", "email.MIMEText", "email.mime.text"),
- MovedModule("email_mime_base", "email.MIMEBase", "email.mime.base"),
- MovedModule("BaseHTTPServer", "BaseHTTPServer", "http.server"),
- MovedModule("CGIHTTPServer", "CGIHTTPServer", "http.server"),
- MovedModule("SimpleHTTPServer", "SimpleHTTPServer", "http.server"),
- MovedModule("cPickle", "cPickle", "pickle"),
- MovedModule("queue", "Queue"),
- MovedModule("reprlib", "repr"),
- MovedModule("socketserver", "SocketServer"),
- MovedModule("_thread", "thread", "_thread"),
- MovedModule("tkinter", "Tkinter"),
- MovedModule("tkinter_dialog", "Dialog", "tkinter.dialog"),
- MovedModule("tkinter_filedialog", "FileDialog", "tkinter.filedialog"),
- MovedModule("tkinter_scrolledtext", "ScrolledText", "tkinter.scrolledtext"),
- MovedModule("tkinter_simpledialog", "SimpleDialog", "tkinter.simpledialog"),
- MovedModule("tkinter_tix", "Tix", "tkinter.tix"),
- MovedModule("tkinter_ttk", "ttk", "tkinter.ttk"),
- MovedModule("tkinter_constants", "Tkconstants", "tkinter.constants"),
- MovedModule("tkinter_dnd", "Tkdnd", "tkinter.dnd"),
- MovedModule("tkinter_colorchooser", "tkColorChooser",
- "tkinter.colorchooser"),
- MovedModule("tkinter_commondialog", "tkCommonDialog",
- "tkinter.commondialog"),
- MovedModule("tkinter_tkfiledialog", "tkFileDialog", "tkinter.filedialog"),
- MovedModule("tkinter_font", "tkFont", "tkinter.font"),
- MovedModule("tkinter_messagebox", "tkMessageBox", "tkinter.messagebox"),
- MovedModule("tkinter_tksimpledialog", "tkSimpleDialog",
- "tkinter.simpledialog"),
- MovedModule("urllib_parse", __name__ + ".moves.urllib_parse", "urllib.parse"),
- MovedModule("urllib_error", __name__ + ".moves.urllib_error", "urllib.error"),
- MovedModule("urllib", __name__ + ".moves.urllib", __name__ + ".moves.urllib"),
- MovedModule("urllib_robotparser", "robotparser", "urllib.robotparser"),
- MovedModule("xmlrpc_client", "xmlrpclib", "xmlrpc.client"),
- MovedModule("xmlrpc_server", "SimpleXMLRPCServer", "xmlrpc.server"),
-]
-# Add windows specific modules.
-if sys.platform == "win32":
- _moved_attributes += [
- MovedModule("winreg", "_winreg"),
- ]
-
-for attr in _moved_attributes:
- setattr(_MovedItems, attr.name, attr)
- if isinstance(attr, MovedModule):
- _importer._add_module(attr, "moves." + attr.name)
-del attr
-
-_MovedItems._moved_attributes = _moved_attributes
-
-moves = _MovedItems(__name__ + ".moves")
-_importer._add_module(moves, "moves")
-
-
-class Module_six_moves_urllib_parse(_LazyModule):
-
- """Lazy loading of moved objects in six.moves.urllib_parse"""
-
-
-_urllib_parse_moved_attributes = [
- MovedAttribute("ParseResult", "urlparse", "urllib.parse"),
- MovedAttribute("SplitResult", "urlparse", "urllib.parse"),
- MovedAttribute("parse_qs", "urlparse", "urllib.parse"),
- MovedAttribute("parse_qsl", "urlparse", "urllib.parse"),
- MovedAttribute("urldefrag", "urlparse", "urllib.parse"),
- MovedAttribute("urljoin", "urlparse", "urllib.parse"),
- MovedAttribute("urlparse", "urlparse", "urllib.parse"),
- MovedAttribute("urlsplit", "urlparse", "urllib.parse"),
- MovedAttribute("urlunparse", "urlparse", "urllib.parse"),
- MovedAttribute("urlunsplit", "urlparse", "urllib.parse"),
- MovedAttribute("quote", "urllib", "urllib.parse"),
- MovedAttribute("quote_plus", "urllib", "urllib.parse"),
- MovedAttribute("unquote", "urllib", "urllib.parse"),
- MovedAttribute("unquote_plus", "urllib", "urllib.parse"),
- MovedAttribute("urlencode", "urllib", "urllib.parse"),
- MovedAttribute("splitquery", "urllib", "urllib.parse"),
- MovedAttribute("splittag", "urllib", "urllib.parse"),
- MovedAttribute("splituser", "urllib", "urllib.parse"),
- MovedAttribute("uses_fragment", "urlparse", "urllib.parse"),
- MovedAttribute("uses_netloc", "urlparse", "urllib.parse"),
- MovedAttribute("uses_params", "urlparse", "urllib.parse"),
- MovedAttribute("uses_query", "urlparse", "urllib.parse"),
- MovedAttribute("uses_relative", "urlparse", "urllib.parse"),
-]
-for attr in _urllib_parse_moved_attributes:
- setattr(Module_six_moves_urllib_parse, attr.name, attr)
-del attr
-
-Module_six_moves_urllib_parse._moved_attributes = _urllib_parse_moved_attributes
-
-_importer._add_module(Module_six_moves_urllib_parse(__name__ + ".moves.urllib_parse"),
- "moves.urllib_parse", "moves.urllib.parse")
-
-
-class Module_six_moves_urllib_error(_LazyModule):
-
- """Lazy loading of moved objects in six.moves.urllib_error"""
-
-
-_urllib_error_moved_attributes = [
- MovedAttribute("URLError", "urllib2", "urllib.error"),
- MovedAttribute("HTTPError", "urllib2", "urllib.error"),
- MovedAttribute("ContentTooShortError", "urllib", "urllib.error"),
-]
-for attr in _urllib_error_moved_attributes:
- setattr(Module_six_moves_urllib_error, attr.name, attr)
-del attr
-
-Module_six_moves_urllib_error._moved_attributes = _urllib_error_moved_attributes
-
-_importer._add_module(Module_six_moves_urllib_error(__name__ + ".moves.urllib.error"),
- "moves.urllib_error", "moves.urllib.error")
-
-
-class Module_six_moves_urllib_request(_LazyModule):
-
- """Lazy loading of moved objects in six.moves.urllib_request"""
-
-
-_urllib_request_moved_attributes = [
- MovedAttribute("urlopen", "urllib2", "urllib.request"),
- MovedAttribute("install_opener", "urllib2", "urllib.request"),
- MovedAttribute("build_opener", "urllib2", "urllib.request"),
- MovedAttribute("pathname2url", "urllib", "urllib.request"),
- MovedAttribute("url2pathname", "urllib", "urllib.request"),
- MovedAttribute("getproxies", "urllib", "urllib.request"),
- MovedAttribute("Request", "urllib2", "urllib.request"),
- MovedAttribute("OpenerDirector", "urllib2", "urllib.request"),
- MovedAttribute("HTTPDefaultErrorHandler", "urllib2", "urllib.request"),
- MovedAttribute("HTTPRedirectHandler", "urllib2", "urllib.request"),
- MovedAttribute("HTTPCookieProcessor", "urllib2", "urllib.request"),
- MovedAttribute("ProxyHandler", "urllib2", "urllib.request"),
- MovedAttribute("BaseHandler", "urllib2", "urllib.request"),
- MovedAttribute("HTTPPasswordMgr", "urllib2", "urllib.request"),
- MovedAttribute("HTTPPasswordMgrWithDefaultRealm", "urllib2", "urllib.request"),
- MovedAttribute("AbstractBasicAuthHandler", "urllib2", "urllib.request"),
- MovedAttribute("HTTPBasicAuthHandler", "urllib2", "urllib.request"),
- MovedAttribute("ProxyBasicAuthHandler", "urllib2", "urllib.request"),
- MovedAttribute("AbstractDigestAuthHandler", "urllib2", "urllib.request"),
- MovedAttribute("HTTPDigestAuthHandler", "urllib2", "urllib.request"),
- MovedAttribute("ProxyDigestAuthHandler", "urllib2", "urllib.request"),
- MovedAttribute("HTTPHandler", "urllib2", "urllib.request"),
- MovedAttribute("HTTPSHandler", "urllib2", "urllib.request"),
- MovedAttribute("FileHandler", "urllib2", "urllib.request"),
- MovedAttribute("FTPHandler", "urllib2", "urllib.request"),
- MovedAttribute("CacheFTPHandler", "urllib2", "urllib.request"),
- MovedAttribute("UnknownHandler", "urllib2", "urllib.request"),
- MovedAttribute("HTTPErrorProcessor", "urllib2", "urllib.request"),
- MovedAttribute("urlretrieve", "urllib", "urllib.request"),
- MovedAttribute("urlcleanup", "urllib", "urllib.request"),
- MovedAttribute("URLopener", "urllib", "urllib.request"),
- MovedAttribute("FancyURLopener", "urllib", "urllib.request"),
- MovedAttribute("proxy_bypass", "urllib", "urllib.request"),
-]
-for attr in _urllib_request_moved_attributes:
- setattr(Module_six_moves_urllib_request, attr.name, attr)
-del attr
-
-Module_six_moves_urllib_request._moved_attributes = _urllib_request_moved_attributes
-
-_importer._add_module(Module_six_moves_urllib_request(__name__ + ".moves.urllib.request"),
- "moves.urllib_request", "moves.urllib.request")
-
-
-class Module_six_moves_urllib_response(_LazyModule):
-
- """Lazy loading of moved objects in six.moves.urllib_response"""
-
-
-_urllib_response_moved_attributes = [
- MovedAttribute("addbase", "urllib", "urllib.response"),
- MovedAttribute("addclosehook", "urllib", "urllib.response"),
- MovedAttribute("addinfo", "urllib", "urllib.response"),
- MovedAttribute("addinfourl", "urllib", "urllib.response"),
-]
-for attr in _urllib_response_moved_attributes:
- setattr(Module_six_moves_urllib_response, attr.name, attr)
-del attr
-
-Module_six_moves_urllib_response._moved_attributes = _urllib_response_moved_attributes
-
-_importer._add_module(Module_six_moves_urllib_response(__name__ + ".moves.urllib.response"),
- "moves.urllib_response", "moves.urllib.response")
-
-
-class Module_six_moves_urllib_robotparser(_LazyModule):
-
- """Lazy loading of moved objects in six.moves.urllib_robotparser"""
-
-
-_urllib_robotparser_moved_attributes = [
- MovedAttribute("RobotFileParser", "robotparser", "urllib.robotparser"),
-]
-for attr in _urllib_robotparser_moved_attributes:
- setattr(Module_six_moves_urllib_robotparser, attr.name, attr)
-del attr
-
-Module_six_moves_urllib_robotparser._moved_attributes = _urllib_robotparser_moved_attributes
-
-_importer._add_module(Module_six_moves_urllib_robotparser(__name__ + ".moves.urllib.robotparser"),
- "moves.urllib_robotparser", "moves.urllib.robotparser")
-
-
-class Module_six_moves_urllib(types.ModuleType):
-
- """Create a six.moves.urllib namespace that resembles the Python 3 namespace"""
- __path__ = [] # mark as package
- parse = _importer._get_module("moves.urllib_parse")
- error = _importer._get_module("moves.urllib_error")
- request = _importer._get_module("moves.urllib_request")
- response = _importer._get_module("moves.urllib_response")
- robotparser = _importer._get_module("moves.urllib_robotparser")
-
- def __dir__(self):
- return ['parse', 'error', 'request', 'response', 'robotparser']
-
-_importer._add_module(Module_six_moves_urllib(__name__ + ".moves.urllib"),
- "moves.urllib")
-
-
-def add_move(move):
- """Add an item to six.moves."""
- setattr(_MovedItems, move.name, move)
-
-
-def remove_move(name):
- """Remove item from six.moves."""
- try:
- delattr(_MovedItems, name)
- except AttributeError:
- try:
- del moves.__dict__[name]
- except KeyError:
- raise AttributeError("no such move, %r" % (name,))
-
-
-if PY3:
- _meth_func = "__func__"
- _meth_self = "__self__"
-
- _func_closure = "__closure__"
- _func_code = "__code__"
- _func_defaults = "__defaults__"
- _func_globals = "__globals__"
-else:
- _meth_func = "im_func"
- _meth_self = "im_self"
-
- _func_closure = "func_closure"
- _func_code = "func_code"
- _func_defaults = "func_defaults"
- _func_globals = "func_globals"
-
-
-try:
- advance_iterator = next
-except NameError:
- def advance_iterator(it):
- return it.next()
-next = advance_iterator
-
-
-try:
- callable = callable
-except NameError:
- def callable(obj):
- return any("__call__" in klass.__dict__ for klass in type(obj).__mro__)
-
-
-if PY3:
- def get_unbound_function(unbound):
- return unbound
-
- create_bound_method = types.MethodType
-
- def create_unbound_method(func, cls):
- return func
-
- Iterator = object
-else:
- def get_unbound_function(unbound):
- return unbound.im_func
-
- def create_bound_method(func, obj):
- return types.MethodType(func, obj, obj.__class__)
-
- def create_unbound_method(func, cls):
- return types.MethodType(func, None, cls)
-
- class Iterator(object):
-
- def next(self):
- return type(self).__next__(self)
-
- callable = callable
-_add_doc(get_unbound_function,
- """Get the function out of a possibly unbound function""")
-
-
-get_method_function = operator.attrgetter(_meth_func)
-get_method_self = operator.attrgetter(_meth_self)
-get_function_closure = operator.attrgetter(_func_closure)
-get_function_code = operator.attrgetter(_func_code)
-get_function_defaults = operator.attrgetter(_func_defaults)
-get_function_globals = operator.attrgetter(_func_globals)
-
-
-if PY3:
- def iterkeys(d, **kw):
- return iter(d.keys(**kw))
-
- def itervalues(d, **kw):
- return iter(d.values(**kw))
-
- def iteritems(d, **kw):
- return iter(d.items(**kw))
-
- def iterlists(d, **kw):
- return iter(d.lists(**kw))
-
- viewkeys = operator.methodcaller("keys")
-
- viewvalues = operator.methodcaller("values")
-
- viewitems = operator.methodcaller("items")
-else:
- def iterkeys(d, **kw):
- return d.iterkeys(**kw)
-
- def itervalues(d, **kw):
- return d.itervalues(**kw)
-
- def iteritems(d, **kw):
- return d.iteritems(**kw)
-
- def iterlists(d, **kw):
- return d.iterlists(**kw)
-
- viewkeys = operator.methodcaller("viewkeys")
-
- viewvalues = operator.methodcaller("viewvalues")
-
- viewitems = operator.methodcaller("viewitems")
-
-_add_doc(iterkeys, "Return an iterator over the keys of a dictionary.")
-_add_doc(itervalues, "Return an iterator over the values of a dictionary.")
-_add_doc(iteritems,
- "Return an iterator over the (key, value) pairs of a dictionary.")
-_add_doc(iterlists,
- "Return an iterator over the (key, [values]) pairs of a dictionary.")
-
-
-if PY3:
- def b(s):
- return s.encode("latin-1")
-
- def u(s):
- return s
- unichr = chr
- import struct
- int2byte = struct.Struct(">B").pack
- del struct
- byte2int = operator.itemgetter(0)
- indexbytes = operator.getitem
- iterbytes = iter
- import io
- StringIO = io.StringIO
- BytesIO = io.BytesIO
- _assertCountEqual = "assertCountEqual"
- if sys.version_info[1] <= 1:
- _assertRaisesRegex = "assertRaisesRegexp"
- _assertRegex = "assertRegexpMatches"
- else:
- _assertRaisesRegex = "assertRaisesRegex"
- _assertRegex = "assertRegex"
-else:
- def b(s):
- return s
- # Workaround for standalone backslash
-
- def u(s):
- return unicode(s.replace(r'\\', r'\\\\'), "unicode_escape")
- unichr = unichr
- int2byte = chr
-
- def byte2int(bs):
- return ord(bs[0])
-
- def indexbytes(buf, i):
- return ord(buf[i])
- iterbytes = functools.partial(itertools.imap, ord)
- import StringIO
- StringIO = BytesIO = StringIO.StringIO
- _assertCountEqual = "assertItemsEqual"
- _assertRaisesRegex = "assertRaisesRegexp"
- _assertRegex = "assertRegexpMatches"
-_add_doc(b, """Byte literal""")
-_add_doc(u, """Text literal""")
-
-
-def assertCountEqual(self, *args, **kwargs):
- return getattr(self, _assertCountEqual)(*args, **kwargs)
-
-
-def assertRaisesRegex(self, *args, **kwargs):
- return getattr(self, _assertRaisesRegex)(*args, **kwargs)
-
-
-def assertRegex(self, *args, **kwargs):
- return getattr(self, _assertRegex)(*args, **kwargs)
-
-
-if PY3:
- exec_ = getattr(moves.builtins, "exec")
-
- def reraise(tp, value, tb=None):
- if value is None:
- value = tp()
- if value.__traceback__ is not tb:
- raise value.with_traceback(tb)
- raise value
-
-else:
- def exec_(_code_, _globs_=None, _locs_=None):
- """Execute code in a namespace."""
- if _globs_ is None:
- frame = sys._getframe(1)
- _globs_ = frame.f_globals
- if _locs_ is None:
- _locs_ = frame.f_locals
- del frame
- elif _locs_ is None:
- _locs_ = _globs_
- exec("""exec _code_ in _globs_, _locs_""")
-
- exec_("""def reraise(tp, value, tb=None):
- raise tp, value, tb
-""")
-
-
-if sys.version_info[:2] == (3, 2):
- exec_("""def raise_from(value, from_value):
- if from_value is None:
- raise value
- raise value from from_value
-""")
-elif sys.version_info[:2] > (3, 2):
- exec_("""def raise_from(value, from_value):
- raise value from from_value
-""")
-else:
- def raise_from(value, from_value):
- raise value
-
-
-print_ = getattr(moves.builtins, "print", None)
-if print_ is None:
- def print_(*args, **kwargs):
- """The new-style print function for Python 2.4 and 2.5."""
- fp = kwargs.pop("file", sys.stdout)
- if fp is None:
- return
-
- def write(data):
- if not isinstance(data, basestring):
- data = str(data)
- # If the file has an encoding, encode unicode with it.
- if (isinstance(fp, file) and
- isinstance(data, unicode) and
- fp.encoding is not None):
- errors = getattr(fp, "errors", None)
- if errors is None:
- errors = "strict"
- data = data.encode(fp.encoding, errors)
- fp.write(data)
- want_unicode = False
- sep = kwargs.pop("sep", None)
- if sep is not None:
- if isinstance(sep, unicode):
- want_unicode = True
- elif not isinstance(sep, str):
- raise TypeError("sep must be None or a string")
- end = kwargs.pop("end", None)
- if end is not None:
- if isinstance(end, unicode):
- want_unicode = True
- elif not isinstance(end, str):
- raise TypeError("end must be None or a string")
- if kwargs:
- raise TypeError("invalid keyword arguments to print()")
- if not want_unicode:
- for arg in args:
- if isinstance(arg, unicode):
- want_unicode = True
- break
- if want_unicode:
- newline = unicode("\n")
- space = unicode(" ")
- else:
- newline = "\n"
- space = " "
- if sep is None:
- sep = space
- if end is None:
- end = newline
- for i, arg in enumerate(args):
- if i:
- write(sep)
- write(arg)
- write(end)
-if sys.version_info[:2] < (3, 3):
- _print = print_
-
- def print_(*args, **kwargs):
- fp = kwargs.get("file", sys.stdout)
- flush = kwargs.pop("flush", False)
- _print(*args, **kwargs)
- if flush and fp is not None:
- fp.flush()
-
-_add_doc(reraise, """Reraise an exception.""")
-
-if sys.version_info[0:2] < (3, 4):
- def wraps(wrapped, assigned=functools.WRAPPER_ASSIGNMENTS,
- updated=functools.WRAPPER_UPDATES):
- def wrapper(f):
- f = functools.wraps(wrapped, assigned, updated)(f)
- f.__wrapped__ = wrapped
- return f
- return wrapper
-else:
- wraps = functools.wraps
-
-
-def with_metaclass(meta, *bases):
- """Create a base class with a metaclass."""
- # This requires a bit of explanation: the basic idea is to make a dummy
- # metaclass for one level of class instantiation that replaces itself with
- # the actual metaclass.
- class metaclass(meta):
-
- def __new__(cls, name, this_bases, d):
- return meta(name, bases, d)
- return type.__new__(metaclass, 'temporary_class', (), {})
-
-
-def add_metaclass(metaclass):
- """Class decorator for creating a class with a metaclass."""
- def wrapper(cls):
- orig_vars = cls.__dict__.copy()
- slots = orig_vars.get('__slots__')
- if slots is not None:
- if isinstance(slots, str):
- slots = [slots]
- for slots_var in slots:
- orig_vars.pop(slots_var)
- orig_vars.pop('__dict__', None)
- orig_vars.pop('__weakref__', None)
- return metaclass(cls.__name__, cls.__bases__, orig_vars)
- return wrapper
-
-
-def python_2_unicode_compatible(klass):
- """
- A decorator that defines __unicode__ and __str__ methods under Python 2.
- Under Python 3 it does nothing.
-
- To support Python 2 and 3 with a single code base, define a __str__ method
- returning text and apply this decorator to the class.
- """
- if PY2:
- if '__str__' not in klass.__dict__:
- raise ValueError("@python_2_unicode_compatible cannot be applied "
- "to %s because it doesn't define __str__()." %
- klass.__name__)
- klass.__unicode__ = klass.__str__
- klass.__str__ = lambda self: self.__unicode__().encode('utf-8')
- return klass
-
-
-# Complete the moves implementation.
-# This code is at the end of this module to speed up module loading.
-# Turn this module into a package.
-__path__ = [] # required for PEP 302 and PEP 451
-__package__ = __name__ # see PEP 366 @ReservedAssignment
-if globals().get("__spec__") is not None:
- __spec__.submodule_search_locations = [] # PEP 451 @UndefinedVariable
-# Remove other six meta path importers, since they cause problems. This can
-# happen if six is removed from sys.modules and then reloaded. (Setuptools does
-# this for some reason.)
-if sys.meta_path:
- for i, importer in enumerate(sys.meta_path):
- # Here's some real nastiness: Another "instance" of the six module might
- # be floating around. Therefore, we can't use isinstance() to check for
- # the six meta path importer, since the other six instance will have
- # inserted an importer with different class.
- if (type(importer).__name__ == "_SixMetaPathImporter" and
- importer.name == __name__):
- del sys.meta_path[i]
- break
- del i, importer
-# Finally, add the importer to the meta path import hook.
-sys.meta_path.append(_importer)
diff --git a/src/collectors/python.d.plugin/python_modules/urllib3/packages/ssl_match_hostname/__init__.py b/src/collectors/python.d.plugin/python_modules/urllib3/packages/ssl_match_hostname/__init__.py
deleted file mode 100644
index 2aeeeff91..000000000
--- a/src/collectors/python.d.plugin/python_modules/urllib3/packages/ssl_match_hostname/__init__.py
+++ /dev/null
@@ -1,20 +0,0 @@
-# SPDX-License-Identifier: MIT
-import sys
-
-try:
- # Our match_hostname function is the same as 3.5's, so we only want to
- # import the match_hostname function if it's at least that good.
- if sys.version_info < (3, 5):
- raise ImportError("Fallback to vendored code")
-
- from ssl import CertificateError, match_hostname
-except ImportError:
- try:
- # Backport of the function from a pypi module
- from backports.ssl_match_hostname import CertificateError, match_hostname
- except ImportError:
- # Our vendored copy
- from ._implementation import CertificateError, match_hostname
-
-# Not needed, but documenting what we provide.
-__all__ = ('CertificateError', 'match_hostname')
diff --git a/src/collectors/python.d.plugin/python_modules/urllib3/packages/ssl_match_hostname/_implementation.py b/src/collectors/python.d.plugin/python_modules/urllib3/packages/ssl_match_hostname/_implementation.py
deleted file mode 100644
index 647e081da..000000000
--- a/src/collectors/python.d.plugin/python_modules/urllib3/packages/ssl_match_hostname/_implementation.py
+++ /dev/null
@@ -1,156 +0,0 @@
-"""The match_hostname() function from Python 3.3.3, essential when using SSL."""
-
-# SPDX-License-Identifier: Python-2.0
-
-import re
-import sys
-
-# ipaddress has been backported to 2.6+ in pypi. If it is installed on the
-# system, use it to handle IPAddress ServerAltnames (this was added in
-# python-3.5) otherwise only do DNS matching. This allows
-# backports.ssl_match_hostname to continue to be used all the way back to
-# python-2.4.
-try:
- import ipaddress
-except ImportError:
- ipaddress = None
-
-__version__ = '3.5.0.1'
-
-
-class CertificateError(ValueError):
- pass
-
-
-def _dnsname_match(dn, hostname, max_wildcards=1):
- """Matching according to RFC 6125, section 6.4.3
-
- http://tools.ietf.org/html/rfc6125#section-6.4.3
- """
- pats = []
- if not dn:
- return False
-
- # Ported from python3-syntax:
- # leftmost, *remainder = dn.split(r'.')
- parts = dn.split(r'.')
- leftmost = parts[0]
- remainder = parts[1:]
-
- wildcards = leftmost.count('*')
- if wildcards > max_wildcards:
- # Issue #17980: avoid denials of service by refusing more
- # than one wildcard per fragment. A survey of established
- # policy among SSL implementations showed it to be a
- # reasonable choice.
- raise CertificateError(
- "too many wildcards in certificate DNS name: " + repr(dn))
-
- # speed up common case w/o wildcards
- if not wildcards:
- return dn.lower() == hostname.lower()
-
- # RFC 6125, section 6.4.3, subitem 1.
- # The client SHOULD NOT attempt to match a presented identifier in which
- # the wildcard character comprises a label other than the left-most label.
- if leftmost == '*':
- # When '*' is a fragment by itself, it matches a non-empty dotless
- # fragment.
- pats.append('[^.]+')
- elif leftmost.startswith('xn--') or hostname.startswith('xn--'):
- # RFC 6125, section 6.4.3, subitem 3.
- # The client SHOULD NOT attempt to match a presented identifier
- # where the wildcard character is embedded within an A-label or
- # U-label of an internationalized domain name.
- pats.append(re.escape(leftmost))
- else:
- # Otherwise, '*' matches any dotless string, e.g. www*
- pats.append(re.escape(leftmost).replace(r'\*', '[^.]*'))
-
- # add the remaining fragments, ignore any wildcards
- for frag in remainder:
- pats.append(re.escape(frag))
-
- pat = re.compile(r'\A' + r'\.'.join(pats) + r'\Z', re.IGNORECASE)
- return pat.match(hostname)
-
-
-def _to_unicode(obj):
- if isinstance(obj, str) and sys.version_info < (3,):
- obj = unicode(obj, encoding='ascii', errors='strict')
- return obj
-
-def _ipaddress_match(ipname, host_ip):
- """Exact matching of IP addresses.
-
- RFC 6125 explicitly doesn't define an algorithm for this
- (section 1.7.2 - "Out of Scope").
- """
- # OpenSSL may add a trailing newline to a subjectAltName's IP address
- # Divergence from upstream: ipaddress can't handle byte str
- ip = ipaddress.ip_address(_to_unicode(ipname).rstrip())
- return ip == host_ip
-
-
-def match_hostname(cert, hostname):
- """Verify that *cert* (in decoded format as returned by
- SSLSocket.getpeercert()) matches the *hostname*. RFC 2818 and RFC 6125
- rules are followed, but IP addresses are not accepted for *hostname*.
-
- CertificateError is raised on failure. On success, the function
- returns nothing.
- """
- if not cert:
- raise ValueError("empty or no certificate, match_hostname needs a "
- "SSL socket or SSL context with either "
- "CERT_OPTIONAL or CERT_REQUIRED")
- try:
- # Divergence from upstream: ipaddress can't handle byte str
- host_ip = ipaddress.ip_address(_to_unicode(hostname))
- except ValueError:
- # Not an IP address (common case)
- host_ip = None
- except UnicodeError:
- # Divergence from upstream: Have to deal with ipaddress not taking
- # byte strings. addresses should be all ascii, so we consider it not
- # an ipaddress in this case
- host_ip = None
- except AttributeError:
- # Divergence from upstream: Make ipaddress library optional
- if ipaddress is None:
- host_ip = None
- else:
- raise
- dnsnames = []
- san = cert.get('subjectAltName', ())
- for key, value in san:
- if key == 'DNS':
- if host_ip is None and _dnsname_match(value, hostname):
- return
- dnsnames.append(value)
- elif key == 'IP Address':
- if host_ip is not None and _ipaddress_match(value, host_ip):
- return
- dnsnames.append(value)
- if not dnsnames:
- # The subject is only checked when there is no dNSName entry
- # in subjectAltName
- for sub in cert.get('subject', ()):
- for key, value in sub:
- # XXX according to RFC 2818, the most specific Common Name
- # must be used.
- if key == 'commonName':
- if _dnsname_match(value, hostname):
- return
- dnsnames.append(value)
- if len(dnsnames) > 1:
- raise CertificateError("hostname %r "
- "doesn't match either of %s"
- % (hostname, ', '.join(map(repr, dnsnames))))
- elif len(dnsnames) == 1:
- raise CertificateError("hostname %r "
- "doesn't match %r"
- % (hostname, dnsnames[0]))
- else:
- raise CertificateError("no appropriate commonName or "
- "subjectAltName fields were found")
diff --git a/src/collectors/python.d.plugin/python_modules/urllib3/poolmanager.py b/src/collectors/python.d.plugin/python_modules/urllib3/poolmanager.py
deleted file mode 100644
index adea9bc01..000000000
--- a/src/collectors/python.d.plugin/python_modules/urllib3/poolmanager.py
+++ /dev/null
@@ -1,441 +0,0 @@
-# SPDX-License-Identifier: MIT
-from __future__ import absolute_import
-import collections
-import functools
-import logging
-
-from ._collections import RecentlyUsedContainer
-from .connectionpool import HTTPConnectionPool, HTTPSConnectionPool
-from .connectionpool import port_by_scheme
-from .exceptions import LocationValueError, MaxRetryError, ProxySchemeUnknown
-from .packages.six.moves.urllib.parse import urljoin
-from .request import RequestMethods
-from .util.url import parse_url
-from .util.retry import Retry
-
-
-__all__ = ['PoolManager', 'ProxyManager', 'proxy_from_url']
-
-
-log = logging.getLogger(__name__)
-
-SSL_KEYWORDS = ('key_file', 'cert_file', 'cert_reqs', 'ca_certs',
- 'ssl_version', 'ca_cert_dir', 'ssl_context')
-
-# All known keyword arguments that could be provided to the pool manager, its
-# pools, or the underlying connections. This is used to construct a pool key.
-_key_fields = (
- 'key_scheme', # str
- 'key_host', # str
- 'key_port', # int
- 'key_timeout', # int or float or Timeout
- 'key_retries', # int or Retry
- 'key_strict', # bool
- 'key_block', # bool
- 'key_source_address', # str
- 'key_key_file', # str
- 'key_cert_file', # str
- 'key_cert_reqs', # str
- 'key_ca_certs', # str
- 'key_ssl_version', # str
- 'key_ca_cert_dir', # str
- 'key_ssl_context', # instance of ssl.SSLContext or urllib3.util.ssl_.SSLContext
- 'key_maxsize', # int
- 'key_headers', # dict
- 'key__proxy', # parsed proxy url
- 'key__proxy_headers', # dict
- 'key_socket_options', # list of (level (int), optname (int), value (int or str)) tuples
- 'key__socks_options', # dict
- 'key_assert_hostname', # bool or string
- 'key_assert_fingerprint', # str
-)
-
-#: The namedtuple class used to construct keys for the connection pool.
-#: All custom key schemes should include the fields in this key at a minimum.
-PoolKey = collections.namedtuple('PoolKey', _key_fields)
-
-
-def _default_key_normalizer(key_class, request_context):
- """
- Create a pool key out of a request context dictionary.
-
- According to RFC 3986, both the scheme and host are case-insensitive.
- Therefore, this function normalizes both before constructing the pool
- key for an HTTPS request. If you wish to change this behaviour, provide
- alternate callables to ``key_fn_by_scheme``.
-
- :param key_class:
- The class to use when constructing the key. This should be a namedtuple
- with the ``scheme`` and ``host`` keys at a minimum.
- :type key_class: namedtuple
- :param request_context:
- A dictionary-like object that contain the context for a request.
- :type request_context: dict
-
- :return: A namedtuple that can be used as a connection pool key.
- :rtype: PoolKey
- """
- # Since we mutate the dictionary, make a copy first
- context = request_context.copy()
- context['scheme'] = context['scheme'].lower()
- context['host'] = context['host'].lower()
-
- # These are both dictionaries and need to be transformed into frozensets
- for key in ('headers', '_proxy_headers', '_socks_options'):
- if key in context and context[key] is not None:
- context[key] = frozenset(context[key].items())
-
- # The socket_options key may be a list and needs to be transformed into a
- # tuple.
- socket_opts = context.get('socket_options')
- if socket_opts is not None:
- context['socket_options'] = tuple(socket_opts)
-
- # Map the kwargs to the names in the namedtuple - this is necessary since
- # namedtuples can't have fields starting with '_'.
- for key in list(context.keys()):
- context['key_' + key] = context.pop(key)
-
- # Default to ``None`` for keys missing from the context
- for field in key_class._fields:
- if field not in context:
- context[field] = None
-
- return key_class(**context)
-
-
-#: A dictionary that maps a scheme to a callable that creates a pool key.
-#: This can be used to alter the way pool keys are constructed, if desired.
-#: Each PoolManager makes a copy of this dictionary so they can be configured
-#: globally here, or individually on the instance.
-key_fn_by_scheme = {
- 'http': functools.partial(_default_key_normalizer, PoolKey),
- 'https': functools.partial(_default_key_normalizer, PoolKey),
-}
-
-pool_classes_by_scheme = {
- 'http': HTTPConnectionPool,
- 'https': HTTPSConnectionPool,
-}
-
-
-class PoolManager(RequestMethods):
- """
- Allows for arbitrary requests while transparently keeping track of
- necessary connection pools for you.
-
- :param num_pools:
- Number of connection pools to cache before discarding the least
- recently used pool.
-
- :param headers:
- Headers to include with all requests, unless other headers are given
- explicitly.
-
- :param \\**connection_pool_kw:
- Additional parameters are used to create fresh
- :class:`urllib3.connectionpool.ConnectionPool` instances.
-
- Example::
-
- >>> manager = PoolManager(num_pools=2)
- >>> r = manager.request('GET', 'http://google.com/')
- >>> r = manager.request('GET', 'http://google.com/mail')
- >>> r = manager.request('GET', 'http://yahoo.com/')
- >>> len(manager.pools)
- 2
-
- """
-
- proxy = None
-
- def __init__(self, num_pools=10, headers=None, **connection_pool_kw):
- RequestMethods.__init__(self, headers)
- self.connection_pool_kw = connection_pool_kw
- self.pools = RecentlyUsedContainer(num_pools,
- dispose_func=lambda p: p.close())
-
- # Locally set the pool classes and keys so other PoolManagers can
- # override them.
- self.pool_classes_by_scheme = pool_classes_by_scheme
- self.key_fn_by_scheme = key_fn_by_scheme.copy()
-
- def __enter__(self):
- return self
-
- def __exit__(self, exc_type, exc_val, exc_tb):
- self.clear()
- # Return False to re-raise any potential exceptions
- return False
-
- def _new_pool(self, scheme, host, port, request_context=None):
- """
- Create a new :class:`ConnectionPool` based on host, port, scheme, and
- any additional pool keyword arguments.
-
- If ``request_context`` is provided, it is provided as keyword arguments
- to the pool class used. This method is used to actually create the
- connection pools handed out by :meth:`connection_from_url` and
- companion methods. It is intended to be overridden for customization.
- """
- pool_cls = self.pool_classes_by_scheme[scheme]
- if request_context is None:
- request_context = self.connection_pool_kw.copy()
-
- # Although the context has everything necessary to create the pool,
- # this function has historically only used the scheme, host, and port
- # in the positional args. When an API change is acceptable these can
- # be removed.
- for key in ('scheme', 'host', 'port'):
- request_context.pop(key, None)
-
- if scheme == 'http':
- for kw in SSL_KEYWORDS:
- request_context.pop(kw, None)
-
- return pool_cls(host, port, **request_context)
-
- def clear(self):
- """
- Empty our store of pools and direct them all to close.
-
- This will not affect in-flight connections, but they will not be
- re-used after completion.
- """
- self.pools.clear()
-
- def connection_from_host(self, host, port=None, scheme='http', pool_kwargs=None):
- """
- Get a :class:`ConnectionPool` based on the host, port, and scheme.
-
- If ``port`` isn't given, it will be derived from the ``scheme`` using
- ``urllib3.connectionpool.port_by_scheme``. If ``pool_kwargs`` is
- provided, it is merged with the instance's ``connection_pool_kw``
- variable and used to create the new connection pool, if one is
- needed.
- """
-
- if not host:
- raise LocationValueError("No host specified.")
-
- request_context = self._merge_pool_kwargs(pool_kwargs)
- request_context['scheme'] = scheme or 'http'
- if not port:
- port = port_by_scheme.get(request_context['scheme'].lower(), 80)
- request_context['port'] = port
- request_context['host'] = host
-
- return self.connection_from_context(request_context)
-
- def connection_from_context(self, request_context):
- """
- Get a :class:`ConnectionPool` based on the request context.
-
- ``request_context`` must at least contain the ``scheme`` key and its
- value must be a key in ``key_fn_by_scheme`` instance variable.
- """
- scheme = request_context['scheme'].lower()
- pool_key_constructor = self.key_fn_by_scheme[scheme]
- pool_key = pool_key_constructor(request_context)
-
- return self.connection_from_pool_key(pool_key, request_context=request_context)
-
- def connection_from_pool_key(self, pool_key, request_context=None):
- """
- Get a :class:`ConnectionPool` based on the provided pool key.
-
- ``pool_key`` should be a namedtuple that only contains immutable
- objects. At a minimum it must have the ``scheme``, ``host``, and
- ``port`` fields.
- """
- with self.pools.lock:
- # If the scheme, host, or port doesn't match existing open
- # connections, open a new ConnectionPool.
- pool = self.pools.get(pool_key)
- if pool:
- return pool
-
- # Make a fresh ConnectionPool of the desired type
- scheme = request_context['scheme']
- host = request_context['host']
- port = request_context['port']
- pool = self._new_pool(scheme, host, port, request_context=request_context)
- self.pools[pool_key] = pool
-
- return pool
-
- def connection_from_url(self, url, pool_kwargs=None):
- """
- Similar to :func:`urllib3.connectionpool.connection_from_url`.
-
- If ``pool_kwargs`` is not provided and a new pool needs to be
- constructed, ``self.connection_pool_kw`` is used to initialize
- the :class:`urllib3.connectionpool.ConnectionPool`. If ``pool_kwargs``
- is provided, it is used instead. Note that if a new pool does not
- need to be created for the request, the provided ``pool_kwargs`` are
- not used.
- """
- u = parse_url(url)
- return self.connection_from_host(u.host, port=u.port, scheme=u.scheme,
- pool_kwargs=pool_kwargs)
-
- def _merge_pool_kwargs(self, override):
- """
- Merge a dictionary of override values for self.connection_pool_kw.
-
- This does not modify self.connection_pool_kw and returns a new dict.
- Any keys in the override dictionary with a value of ``None`` are
- removed from the merged dictionary.
- """
- base_pool_kwargs = self.connection_pool_kw.copy()
- if override:
- for key, value in override.items():
- if value is None:
- try:
- del base_pool_kwargs[key]
- except KeyError:
- pass
- else:
- base_pool_kwargs[key] = value
- return base_pool_kwargs
-
- def urlopen(self, method, url, redirect=True, **kw):
- """
- Same as :meth:`urllib3.connectionpool.HTTPConnectionPool.urlopen`
- with custom cross-host redirect logic and only sends the request-uri
- portion of the ``url``.
-
- The given ``url`` parameter must be absolute, such that an appropriate
- :class:`urllib3.connectionpool.ConnectionPool` can be chosen for it.
- """
- u = parse_url(url)
- conn = self.connection_from_host(u.host, port=u.port, scheme=u.scheme)
-
- kw['assert_same_host'] = False
- kw['redirect'] = False
- if 'headers' not in kw:
- kw['headers'] = self.headers
-
- if self.proxy is not None and u.scheme == "http":
- response = conn.urlopen(method, url, **kw)
- else:
- response = conn.urlopen(method, u.request_uri, **kw)
-
- redirect_location = redirect and response.get_redirect_location()
- if not redirect_location:
- return response
-
- # Support relative URLs for redirecting.
- redirect_location = urljoin(url, redirect_location)
-
- # RFC 7231, Section 6.4.4
- if response.status == 303:
- method = 'GET'
-
- retries = kw.get('retries')
- if not isinstance(retries, Retry):
- retries = Retry.from_int(retries, redirect=redirect)
-
- try:
- retries = retries.increment(method, url, response=response, _pool=conn)
- except MaxRetryError:
- if retries.raise_on_redirect:
- raise
- return response
-
- kw['retries'] = retries
- kw['redirect'] = redirect
-
- log.info("Redirecting %s -> %s", url, redirect_location)
- return self.urlopen(method, redirect_location, **kw)
-
-
-class ProxyManager(PoolManager):
- """
- Behaves just like :class:`PoolManager`, but sends all requests through
- the defined proxy, using the CONNECT method for HTTPS URLs.
-
- :param proxy_url:
- The URL of the proxy to be used.
-
- :param proxy_headers:
- A dictionary contaning headers that will be sent to the proxy. In case
- of HTTP they are being sent with each request, while in the
- HTTPS/CONNECT case they are sent only once. Could be used for proxy
- authentication.
-
- Example:
- >>> proxy = urllib3.ProxyManager('http://localhost:3128/')
- >>> r1 = proxy.request('GET', 'http://google.com/')
- >>> r2 = proxy.request('GET', 'http://httpbin.org/')
- >>> len(proxy.pools)
- 1
- >>> r3 = proxy.request('GET', 'https://httpbin.org/')
- >>> r4 = proxy.request('GET', 'https://twitter.com/')
- >>> len(proxy.pools)
- 3
-
- """
-
- def __init__(self, proxy_url, num_pools=10, headers=None,
- proxy_headers=None, **connection_pool_kw):
-
- if isinstance(proxy_url, HTTPConnectionPool):
- proxy_url = '%s://%s:%i' % (proxy_url.scheme, proxy_url.host,
- proxy_url.port)
- proxy = parse_url(proxy_url)
- if not proxy.port:
- port = port_by_scheme.get(proxy.scheme, 80)
- proxy = proxy._replace(port=port)
-
- if proxy.scheme not in ("http", "https"):
- raise ProxySchemeUnknown(proxy.scheme)
-
- self.proxy = proxy
- self.proxy_headers = proxy_headers or {}
-
- connection_pool_kw['_proxy'] = self.proxy
- connection_pool_kw['_proxy_headers'] = self.proxy_headers
-
- super(ProxyManager, self).__init__(
- num_pools, headers, **connection_pool_kw)
-
- def connection_from_host(self, host, port=None, scheme='http', pool_kwargs=None):
- if scheme == "https":
- return super(ProxyManager, self).connection_from_host(
- host, port, scheme, pool_kwargs=pool_kwargs)
-
- return super(ProxyManager, self).connection_from_host(
- self.proxy.host, self.proxy.port, self.proxy.scheme, pool_kwargs=pool_kwargs)
-
- def _set_proxy_headers(self, url, headers=None):
- """
- Sets headers needed by proxies: specifically, the Accept and Host
- headers. Only sets headers not provided by the user.
- """
- headers_ = {'Accept': '*/*'}
-
- netloc = parse_url(url).netloc
- if netloc:
- headers_['Host'] = netloc
-
- if headers:
- headers_.update(headers)
- return headers_
-
- def urlopen(self, method, url, redirect=True, **kw):
- "Same as HTTP(S)ConnectionPool.urlopen, ``url`` must be absolute."
- u = parse_url(url)
-
- if u.scheme == "http":
- # For proxied HTTPS requests, httplib sets the necessary headers
- # on the CONNECT to the proxy. For HTTP, we'll definitely
- # need to set 'Host' at the very least.
- headers = kw.get('headers', self.headers)
- kw['headers'] = self._set_proxy_headers(url, headers)
-
- return super(ProxyManager, self).urlopen(method, url, redirect=redirect, **kw)
-
-
-def proxy_from_url(url, **kw):
- return ProxyManager(proxy_url=url, **kw)
diff --git a/src/collectors/python.d.plugin/python_modules/urllib3/request.py b/src/collectors/python.d.plugin/python_modules/urllib3/request.py
deleted file mode 100644
index f78331975..000000000
--- a/src/collectors/python.d.plugin/python_modules/urllib3/request.py
+++ /dev/null
@@ -1,149 +0,0 @@
-# SPDX-License-Identifier: MIT
-from __future__ import absolute_import
-
-from .filepost import encode_multipart_formdata
-from .packages.six.moves.urllib.parse import urlencode
-
-
-__all__ = ['RequestMethods']
-
-
-class RequestMethods(object):
- """
- Convenience mixin for classes who implement a :meth:`urlopen` method, such
- as :class:`~urllib3.connectionpool.HTTPConnectionPool` and
- :class:`~urllib3.poolmanager.PoolManager`.
-
- Provides behavior for making common types of HTTP request methods and
- decides which type of request field encoding to use.
-
- Specifically,
-
- :meth:`.request_encode_url` is for sending requests whose fields are
- encoded in the URL (such as GET, HEAD, DELETE).
-
- :meth:`.request_encode_body` is for sending requests whose fields are
- encoded in the *body* of the request using multipart or www-form-urlencoded
- (such as for POST, PUT, PATCH).
-
- :meth:`.request` is for making any kind of request, it will look up the
- appropriate encoding format and use one of the above two methods to make
- the request.
-
- Initializer parameters:
-
- :param headers:
- Headers to include with all requests, unless other headers are given
- explicitly.
- """
-
- _encode_url_methods = set(['DELETE', 'GET', 'HEAD', 'OPTIONS'])
-
- def __init__(self, headers=None):
- self.headers = headers or {}
-
- def urlopen(self, method, url, body=None, headers=None,
- encode_multipart=True, multipart_boundary=None,
- **kw): # Abstract
- raise NotImplemented("Classes extending RequestMethods must implement "
- "their own ``urlopen`` method.")
-
- def request(self, method, url, fields=None, headers=None, **urlopen_kw):
- """
- Make a request using :meth:`urlopen` with the appropriate encoding of
- ``fields`` based on the ``method`` used.
-
- This is a convenience method that requires the least amount of manual
- effort. It can be used in most situations, while still having the
- option to drop down to more specific methods when necessary, such as
- :meth:`request_encode_url`, :meth:`request_encode_body`,
- or even the lowest level :meth:`urlopen`.
- """
- method = method.upper()
-
- if method in self._encode_url_methods:
- return self.request_encode_url(method, url, fields=fields,
- headers=headers,
- **urlopen_kw)
- else:
- return self.request_encode_body(method, url, fields=fields,
- headers=headers,
- **urlopen_kw)
-
- def request_encode_url(self, method, url, fields=None, headers=None,
- **urlopen_kw):
- """
- Make a request using :meth:`urlopen` with the ``fields`` encoded in
- the url. This is useful for request methods like GET, HEAD, DELETE, etc.
- """
- if headers is None:
- headers = self.headers
-
- extra_kw = {'headers': headers}
- extra_kw.update(urlopen_kw)
-
- if fields:
- url += '?' + urlencode(fields)
-
- return self.urlopen(method, url, **extra_kw)
-
- def request_encode_body(self, method, url, fields=None, headers=None,
- encode_multipart=True, multipart_boundary=None,
- **urlopen_kw):
- """
- Make a request using :meth:`urlopen` with the ``fields`` encoded in
- the body. This is useful for request methods like POST, PUT, PATCH, etc.
-
- When ``encode_multipart=True`` (default), then
- :meth:`urllib3.filepost.encode_multipart_formdata` is used to encode
- the payload with the appropriate content type. Otherwise
- :meth:`urllib.urlencode` is used with the
- 'application/x-www-form-urlencoded' content type.
-
- Multipart encoding must be used when posting files, and it's reasonably
- safe to use it in other times too. However, it may break request
- signing, such as with OAuth.
-
- Supports an optional ``fields`` parameter of key/value strings AND
- key/filetuple. A filetuple is a (filename, data, MIME type) tuple where
- the MIME type is optional. For example::
-
- fields = {
- 'foo': 'bar',
- 'fakefile': ('foofile.txt', 'contents of foofile'),
- 'realfile': ('barfile.txt', open('realfile').read()),
- 'typedfile': ('bazfile.bin', open('bazfile').read(),
- 'image/jpeg'),
- 'nonamefile': 'contents of nonamefile field',
- }
-
- When uploading a file, providing a filename (the first parameter of the
- tuple) is optional but recommended to best mimick behavior of browsers.
-
- Note that if ``headers`` are supplied, the 'Content-Type' header will
- be overwritten because it depends on the dynamic random boundary string
- which is used to compose the body of the request. The random boundary
- string can be explicitly set with the ``multipart_boundary`` parameter.
- """
- if headers is None:
- headers = self.headers
-
- extra_kw = {'headers': {}}
-
- if fields:
- if 'body' in urlopen_kw:
- raise TypeError(
- "request got values for both 'fields' and 'body', can only specify one.")
-
- if encode_multipart:
- body, content_type = encode_multipart_formdata(fields, boundary=multipart_boundary)
- else:
- body, content_type = urlencode(fields), 'application/x-www-form-urlencoded'
-
- extra_kw['body'] = body
- extra_kw['headers'] = {'Content-Type': content_type}
-
- extra_kw['headers'].update(headers)
- extra_kw.update(urlopen_kw)
-
- return self.urlopen(method, url, **extra_kw)
diff --git a/src/collectors/python.d.plugin/python_modules/urllib3/response.py b/src/collectors/python.d.plugin/python_modules/urllib3/response.py
deleted file mode 100644
index cf14a3076..000000000
--- a/src/collectors/python.d.plugin/python_modules/urllib3/response.py
+++ /dev/null
@@ -1,623 +0,0 @@
-# SPDX-License-Identifier: MIT
-from __future__ import absolute_import
-from contextlib import contextmanager
-import zlib
-import io
-import logging
-from socket import timeout as SocketTimeout
-from socket import error as SocketError
-
-from ._collections import HTTPHeaderDict
-from .exceptions import (
- BodyNotHttplibCompatible, ProtocolError, DecodeError, ReadTimeoutError,
- ResponseNotChunked, IncompleteRead, InvalidHeader
-)
-from .packages.six import string_types as basestring, binary_type, PY3
-from .packages.six.moves import http_client as httplib
-from .connection import HTTPException, BaseSSLError
-from .util.response import is_fp_closed, is_response_to_head
-
-log = logging.getLogger(__name__)
-
-
-class DeflateDecoder(object):
-
- def __init__(self):
- self._first_try = True
- self._data = binary_type()
- self._obj = zlib.decompressobj()
-
- def __getattr__(self, name):
- return getattr(self._obj, name)
-
- def decompress(self, data):
- if not data:
- return data
-
- if not self._first_try:
- return self._obj.decompress(data)
-
- self._data += data
- try:
- decompressed = self._obj.decompress(data)
- if decompressed:
- self._first_try = False
- self._data = None
- return decompressed
- except zlib.error:
- self._first_try = False
- self._obj = zlib.decompressobj(-zlib.MAX_WBITS)
- try:
- return self.decompress(self._data)
- finally:
- self._data = None
-
-
-class GzipDecoder(object):
-
- def __init__(self):
- self._obj = zlib.decompressobj(16 + zlib.MAX_WBITS)
-
- def __getattr__(self, name):
- return getattr(self._obj, name)
-
- def decompress(self, data):
- if not data:
- return data
- return self._obj.decompress(data)
-
-
-def _get_decoder(mode):
- if mode == 'gzip':
- return GzipDecoder()
-
- return DeflateDecoder()
-
-
-class HTTPResponse(io.IOBase):
- """
- HTTP Response container.
-
- Backwards-compatible to httplib's HTTPResponse but the response ``body`` is
- loaded and decoded on-demand when the ``data`` property is accessed. This
- class is also compatible with the Python standard library's :mod:`io`
- module, and can hence be treated as a readable object in the context of that
- framework.
-
- Extra parameters for behaviour not present in httplib.HTTPResponse:
-
- :param preload_content:
- If True, the response's body will be preloaded during construction.
-
- :param decode_content:
- If True, attempts to decode specific content-encoding's based on headers
- (like 'gzip' and 'deflate') will be skipped and raw data will be used
- instead.
-
- :param original_response:
- When this HTTPResponse wrapper is generated from an httplib.HTTPResponse
- object, it's convenient to include the original for debug purposes. It's
- otherwise unused.
-
- :param retries:
- The retries contains the last :class:`~urllib3.util.retry.Retry` that
- was used during the request.
-
- :param enforce_content_length:
- Enforce content length checking. Body returned by server must match
- value of Content-Length header, if present. Otherwise, raise error.
- """
-
- CONTENT_DECODERS = ['gzip', 'deflate']
- REDIRECT_STATUSES = [301, 302, 303, 307, 308]
-
- def __init__(self, body='', headers=None, status=0, version=0, reason=None,
- strict=0, preload_content=True, decode_content=True,
- original_response=None, pool=None, connection=None,
- retries=None, enforce_content_length=False, request_method=None):
-
- if isinstance(headers, HTTPHeaderDict):
- self.headers = headers
- else:
- self.headers = HTTPHeaderDict(headers)
- self.status = status
- self.version = version
- self.reason = reason
- self.strict = strict
- self.decode_content = decode_content
- self.retries = retries
- self.enforce_content_length = enforce_content_length
-
- self._decoder = None
- self._body = None
- self._fp = None
- self._original_response = original_response
- self._fp_bytes_read = 0
-
- if body and isinstance(body, (basestring, binary_type)):
- self._body = body
-
- self._pool = pool
- self._connection = connection
-
- if hasattr(body, 'read'):
- self._fp = body
-
- # Are we using the chunked-style of transfer encoding?
- self.chunked = False
- self.chunk_left = None
- tr_enc = self.headers.get('transfer-encoding', '').lower()
- # Don't incur the penalty of creating a list and then discarding it
- encodings = (enc.strip() for enc in tr_enc.split(","))
- if "chunked" in encodings:
- self.chunked = True
-
- # Determine length of response
- self.length_remaining = self._init_length(request_method)
-
- # If requested, preload the body.
- if preload_content and not self._body:
- self._body = self.read(decode_content=decode_content)
-
- def get_redirect_location(self):
- """
- Should we redirect and where to?
-
- :returns: Truthy redirect location string if we got a redirect status
- code and valid location. ``None`` if redirect status and no
- location. ``False`` if not a redirect status code.
- """
- if self.status in self.REDIRECT_STATUSES:
- return self.headers.get('location')
-
- return False
-
- def release_conn(self):
- if not self._pool or not self._connection:
- return
-
- self._pool._put_conn(self._connection)
- self._connection = None
-
- @property
- def data(self):
- # For backwords-compat with earlier urllib3 0.4 and earlier.
- if self._body:
- return self._body
-
- if self._fp:
- return self.read(cache_content=True)
-
- @property
- def connection(self):
- return self._connection
-
- def tell(self):
- """
- Obtain the number of bytes pulled over the wire so far. May differ from
- the amount of content returned by :meth:``HTTPResponse.read`` if bytes
- are encoded on the wire (e.g, compressed).
- """
- return self._fp_bytes_read
-
- def _init_length(self, request_method):
- """
- Set initial length value for Response content if available.
- """
- length = self.headers.get('content-length')
-
- if length is not None and self.chunked:
- # This Response will fail with an IncompleteRead if it can't be
- # received as chunked. This method falls back to attempt reading
- # the response before raising an exception.
- log.warning("Received response with both Content-Length and "
- "Transfer-Encoding set. This is expressly forbidden "
- "by RFC 7230 sec 3.3.2. Ignoring Content-Length and "
- "attempting to process response as Transfer-Encoding: "
- "chunked.")
- return None
-
- elif length is not None:
- try:
- # RFC 7230 section 3.3.2 specifies multiple content lengths can
- # be sent in a single Content-Length header
- # (e.g. Content-Length: 42, 42). This line ensures the values
- # are all valid ints and that as long as the `set` length is 1,
- # all values are the same. Otherwise, the header is invalid.
- lengths = set([int(val) for val in length.split(',')])
- if len(lengths) > 1:
- raise InvalidHeader("Content-Length contained multiple "
- "unmatching values (%s)" % length)
- length = lengths.pop()
- except ValueError:
- length = None
- else:
- if length < 0:
- length = None
-
- # Convert status to int for comparison
- # In some cases, httplib returns a status of "_UNKNOWN"
- try:
- status = int(self.status)
- except ValueError:
- status = 0
-
- # Check for responses that shouldn't include a body
- if status in (204, 304) or 100 <= status < 200 or request_method == 'HEAD':
- length = 0
-
- return length
-
- def _init_decoder(self):
- """
- Set-up the _decoder attribute if necessary.
- """
- # Note: content-encoding value should be case-insensitive, per RFC 7230
- # Section 3.2
- content_encoding = self.headers.get('content-encoding', '').lower()
- if self._decoder is None and content_encoding in self.CONTENT_DECODERS:
- self._decoder = _get_decoder(content_encoding)
-
- def _decode(self, data, decode_content, flush_decoder):
- """
- Decode the data passed in and potentially flush the decoder.
- """
- try:
- if decode_content and self._decoder:
- data = self._decoder.decompress(data)
- except (IOError, zlib.error) as e:
- content_encoding = self.headers.get('content-encoding', '').lower()
- raise DecodeError(
- "Received response with content-encoding: %s, but "
- "failed to decode it." % content_encoding, e)
-
- if flush_decoder and decode_content:
- data += self._flush_decoder()
-
- return data
-
- def _flush_decoder(self):
- """
- Flushes the decoder. Should only be called if the decoder is actually
- being used.
- """
- if self._decoder:
- buf = self._decoder.decompress(b'')
- return buf + self._decoder.flush()
-
- return b''
-
- @contextmanager
- def _error_catcher(self):
- """
- Catch low-level python exceptions, instead re-raising urllib3
- variants, so that low-level exceptions are not leaked in the
- high-level api.
-
- On exit, release the connection back to the pool.
- """
- clean_exit = False
-
- try:
- try:
- yield
-
- except SocketTimeout:
- # FIXME: Ideally we'd like to include the url in the ReadTimeoutError but
- # there is yet no clean way to get at it from this context.
- raise ReadTimeoutError(self._pool, None, 'Read timed out.')
-
- except BaseSSLError as e:
- # FIXME: Is there a better way to differentiate between SSLErrors?
- if 'read operation timed out' not in str(e): # Defensive:
- # This shouldn't happen but just in case we're missing an edge
- # case, let's avoid swallowing SSL errors.
- raise
-
- raise ReadTimeoutError(self._pool, None, 'Read timed out.')
-
- except (HTTPException, SocketError) as e:
- # This includes IncompleteRead.
- raise ProtocolError('Connection broken: %r' % e, e)
-
- # If no exception is thrown, we should avoid cleaning up
- # unnecessarily.
- clean_exit = True
- finally:
- # If we didn't terminate cleanly, we need to throw away our
- # connection.
- if not clean_exit:
- # The response may not be closed but we're not going to use it
- # anymore so close it now to ensure that the connection is
- # released back to the pool.
- if self._original_response:
- self._original_response.close()
-
- # Closing the response may not actually be sufficient to close
- # everything, so if we have a hold of the connection close that
- # too.
- if self._connection:
- self._connection.close()
-
- # If we hold the original response but it's closed now, we should
- # return the connection back to the pool.
- if self._original_response and self._original_response.isclosed():
- self.release_conn()
-
- def read(self, amt=None, decode_content=None, cache_content=False):
- """
- Similar to :meth:`httplib.HTTPResponse.read`, but with two additional
- parameters: ``decode_content`` and ``cache_content``.
-
- :param amt:
- How much of the content to read. If specified, caching is skipped
- because it doesn't make sense to cache partial content as the full
- response.
-
- :param decode_content:
- If True, will attempt to decode the body based on the
- 'content-encoding' header.
-
- :param cache_content:
- If True, will save the returned data such that the same result is
- returned despite of the state of the underlying file object. This
- is useful if you want the ``.data`` property to continue working
- after having ``.read()`` the file object. (Overridden if ``amt`` is
- set.)
- """
- self._init_decoder()
- if decode_content is None:
- decode_content = self.decode_content
-
- if self._fp is None:
- return
-
- flush_decoder = False
- data = None
-
- with self._error_catcher():
- if amt is None:
- # cStringIO doesn't like amt=None
- data = self._fp.read()
- flush_decoder = True
- else:
- cache_content = False
- data = self._fp.read(amt)
- if amt != 0 and not data: # Platform-specific: Buggy versions of Python.
- # Close the connection when no data is returned
- #
- # This is redundant to what httplib/http.client _should_
- # already do. However, versions of python released before
- # December 15, 2012 (http://bugs.python.org/issue16298) do
- # not properly close the connection in all cases. There is
- # no harm in redundantly calling close.
- self._fp.close()
- flush_decoder = True
- if self.enforce_content_length and self.length_remaining not in (0, None):
- # This is an edge case that httplib failed to cover due
- # to concerns of backward compatibility. We're
- # addressing it here to make sure IncompleteRead is
- # raised during streaming, so all calls with incorrect
- # Content-Length are caught.
- raise IncompleteRead(self._fp_bytes_read, self.length_remaining)
-
- if data:
- self._fp_bytes_read += len(data)
- if self.length_remaining is not None:
- self.length_remaining -= len(data)
-
- data = self._decode(data, decode_content, flush_decoder)
-
- if cache_content:
- self._body = data
-
- return data
-
- def stream(self, amt=2**16, decode_content=None):
- """
- A generator wrapper for the read() method. A call will block until
- ``amt`` bytes have been read from the connection or until the
- connection is closed.
-
- :param amt:
- How much of the content to read. The generator will return up to
- much data per iteration, but may return less. This is particularly
- likely when using compressed data. However, the empty string will
- never be returned.
-
- :param decode_content:
- If True, will attempt to decode the body based on the
- 'content-encoding' header.
- """
- if self.chunked and self.supports_chunked_reads():
- for line in self.read_chunked(amt, decode_content=decode_content):
- yield line
- else:
- while not is_fp_closed(self._fp):
- data = self.read(amt=amt, decode_content=decode_content)
-
- if data:
- yield data
-
- @classmethod
- def from_httplib(ResponseCls, r, **response_kw):
- """
- Given an :class:`httplib.HTTPResponse` instance ``r``, return a
- corresponding :class:`urllib3.response.HTTPResponse` object.
-
- Remaining parameters are passed to the HTTPResponse constructor, along
- with ``original_response=r``.
- """
- headers = r.msg
-
- if not isinstance(headers, HTTPHeaderDict):
- if PY3: # Python 3
- headers = HTTPHeaderDict(headers.items())
- else: # Python 2
- headers = HTTPHeaderDict.from_httplib(headers)
-
- # HTTPResponse objects in Python 3 don't have a .strict attribute
- strict = getattr(r, 'strict', 0)
- resp = ResponseCls(body=r,
- headers=headers,
- status=r.status,
- version=r.version,
- reason=r.reason,
- strict=strict,
- original_response=r,
- **response_kw)
- return resp
-
- # Backwards-compatibility methods for httplib.HTTPResponse
- def getheaders(self):
- return self.headers
-
- def getheader(self, name, default=None):
- return self.headers.get(name, default)
-
- # Overrides from io.IOBase
- def close(self):
- if not self.closed:
- self._fp.close()
-
- if self._connection:
- self._connection.close()
-
- @property
- def closed(self):
- if self._fp is None:
- return True
- elif hasattr(self._fp, 'isclosed'):
- return self._fp.isclosed()
- elif hasattr(self._fp, 'closed'):
- return self._fp.closed
- else:
- return True
-
- def fileno(self):
- if self._fp is None:
- raise IOError("HTTPResponse has no file to get a fileno from")
- elif hasattr(self._fp, "fileno"):
- return self._fp.fileno()
- else:
- raise IOError("The file-like object this HTTPResponse is wrapped "
- "around has no file descriptor")
-
- def flush(self):
- if self._fp is not None and hasattr(self._fp, 'flush'):
- return self._fp.flush()
-
- def readable(self):
- # This method is required for `io` module compatibility.
- return True
-
- def readinto(self, b):
- # This method is required for `io` module compatibility.
- temp = self.read(len(b))
- if len(temp) == 0:
- return 0
- else:
- b[:len(temp)] = temp
- return len(temp)
-
- def supports_chunked_reads(self):
- """
- Checks if the underlying file-like object looks like a
- httplib.HTTPResponse object. We do this by testing for the fp
- attribute. If it is present we assume it returns raw chunks as
- processed by read_chunked().
- """
- return hasattr(self._fp, 'fp')
-
- def _update_chunk_length(self):
- # First, we'll figure out length of a chunk and then
- # we'll try to read it from socket.
- if self.chunk_left is not None:
- return
- line = self._fp.fp.readline()
- line = line.split(b';', 1)[0]
- try:
- self.chunk_left = int(line, 16)
- except ValueError:
- # Invalid chunked protocol response, abort.
- self.close()
- raise httplib.IncompleteRead(line)
-
- def _handle_chunk(self, amt):
- returned_chunk = None
- if amt is None:
- chunk = self._fp._safe_read(self.chunk_left)
- returned_chunk = chunk
- self._fp._safe_read(2) # Toss the CRLF at the end of the chunk.
- self.chunk_left = None
- elif amt < self.chunk_left:
- value = self._fp._safe_read(amt)
- self.chunk_left = self.chunk_left - amt
- returned_chunk = value
- elif amt == self.chunk_left:
- value = self._fp._safe_read(amt)
- self._fp._safe_read(2) # Toss the CRLF at the end of the chunk.
- self.chunk_left = None
- returned_chunk = value
- else: # amt > self.chunk_left
- returned_chunk = self._fp._safe_read(self.chunk_left)
- self._fp._safe_read(2) # Toss the CRLF at the end of the chunk.
- self.chunk_left = None
- return returned_chunk
-
- def read_chunked(self, amt=None, decode_content=None):
- """
- Similar to :meth:`HTTPResponse.read`, but with an additional
- parameter: ``decode_content``.
-
- :param decode_content:
- If True, will attempt to decode the body based on the
- 'content-encoding' header.
- """
- self._init_decoder()
- # FIXME: Rewrite this method and make it a class with a better structured logic.
- if not self.chunked:
- raise ResponseNotChunked(
- "Response is not chunked. "
- "Header 'transfer-encoding: chunked' is missing.")
- if not self.supports_chunked_reads():
- raise BodyNotHttplibCompatible(
- "Body should be httplib.HTTPResponse like. "
- "It should have have an fp attribute which returns raw chunks.")
-
- # Don't bother reading the body of a HEAD request.
- if self._original_response and is_response_to_head(self._original_response):
- self._original_response.close()
- return
-
- with self._error_catcher():
- while True:
- self._update_chunk_length()
- if self.chunk_left == 0:
- break
- chunk = self._handle_chunk(amt)
- decoded = self._decode(chunk, decode_content=decode_content,
- flush_decoder=False)
- if decoded:
- yield decoded
-
- if decode_content:
- # On CPython and PyPy, we should never need to flush the
- # decoder. However, on Jython we *might* need to, so
- # lets defensively do it anyway.
- decoded = self._flush_decoder()
- if decoded: # Platform-specific: Jython.
- yield decoded
-
- # Chunk content ends with \r\n: discard it.
- while True:
- line = self._fp.fp.readline()
- if not line:
- # Some sites may not end with '\r\n'.
- break
- if line == b'\r\n':
- break
-
- # We read everything; close the "file".
- if self._original_response:
- self._original_response.close()
diff --git a/src/collectors/python.d.plugin/python_modules/urllib3/util/__init__.py b/src/collectors/python.d.plugin/python_modules/urllib3/util/__init__.py
deleted file mode 100644
index bba628d98..000000000
--- a/src/collectors/python.d.plugin/python_modules/urllib3/util/__init__.py
+++ /dev/null
@@ -1,55 +0,0 @@
-# SPDX-License-Identifier: MIT
-from __future__ import absolute_import
-# For backwards compatibility, provide imports that used to be here.
-from .connection import is_connection_dropped
-from .request import make_headers
-from .response import is_fp_closed
-from .ssl_ import (
- SSLContext,
- HAS_SNI,
- IS_PYOPENSSL,
- IS_SECURETRANSPORT,
- assert_fingerprint,
- resolve_cert_reqs,
- resolve_ssl_version,
- ssl_wrap_socket,
-)
-from .timeout import (
- current_time,
- Timeout,
-)
-
-from .retry import Retry
-from .url import (
- get_host,
- parse_url,
- split_first,
- Url,
-)
-from .wait import (
- wait_for_read,
- wait_for_write
-)
-
-__all__ = (
- 'HAS_SNI',
- 'IS_PYOPENSSL',
- 'IS_SECURETRANSPORT',
- 'SSLContext',
- 'Retry',
- 'Timeout',
- 'Url',
- 'assert_fingerprint',
- 'current_time',
- 'is_connection_dropped',
- 'is_fp_closed',
- 'get_host',
- 'parse_url',
- 'make_headers',
- 'resolve_cert_reqs',
- 'resolve_ssl_version',
- 'split_first',
- 'ssl_wrap_socket',
- 'wait_for_read',
- 'wait_for_write'
-)
diff --git a/src/collectors/python.d.plugin/python_modules/urllib3/util/connection.py b/src/collectors/python.d.plugin/python_modules/urllib3/util/connection.py
deleted file mode 100644
index 3bd69e8fa..000000000
--- a/src/collectors/python.d.plugin/python_modules/urllib3/util/connection.py
+++ /dev/null
@@ -1,131 +0,0 @@
-# SPDX-License-Identifier: MIT
-from __future__ import absolute_import
-import socket
-from .wait import wait_for_read
-from .selectors import HAS_SELECT, SelectorError
-
-
-def is_connection_dropped(conn): # Platform-specific
- """
- Returns True if the connection is dropped and should be closed.
-
- :param conn:
- :class:`httplib.HTTPConnection` object.
-
- Note: For platforms like AppEngine, this will always return ``False`` to
- let the platform handle connection recycling transparently for us.
- """
- sock = getattr(conn, 'sock', False)
- if sock is False: # Platform-specific: AppEngine
- return False
- if sock is None: # Connection already closed (such as by httplib).
- return True
-
- if not HAS_SELECT:
- return False
-
- try:
- return bool(wait_for_read(sock, timeout=0.0))
- except SelectorError:
- return True
-
-
-# This function is copied from socket.py in the Python 2.7 standard
-# library test suite. Added to its signature is only `socket_options`.
-# One additional modification is that we avoid binding to IPv6 servers
-# discovered in DNS if the system doesn't have IPv6 functionality.
-def create_connection(address, timeout=socket._GLOBAL_DEFAULT_TIMEOUT,
- source_address=None, socket_options=None):
- """Connect to *address* and return the socket object.
-
- Convenience function. Connect to *address* (a 2-tuple ``(host,
- port)``) and return the socket object. Passing the optional
- *timeout* parameter will set the timeout on the socket instance
- before attempting to connect. If no *timeout* is supplied, the
- global default timeout setting returned by :func:`getdefaulttimeout`
- is used. If *source_address* is set it must be a tuple of (host, port)
- for the socket to bind as a source address before making the connection.
- An host of '' or port 0 tells the OS to use the default.
- """
-
- host, port = address
- if host.startswith('['):
- host = host.strip('[]')
- err = None
-
- # Using the value from allowed_gai_family() in the context of getaddrinfo lets
- # us select whether to work with IPv4 DNS records, IPv6 records, or both.
- # The original create_connection function always returns all records.
- family = allowed_gai_family()
-
- for res in socket.getaddrinfo(host, port, family, socket.SOCK_STREAM):
- af, socktype, proto, canonname, sa = res
- sock = None
- try:
- sock = socket.socket(af, socktype, proto)
-
- # If provided, set socket level options before connecting.
- _set_socket_options(sock, socket_options)
-
- if timeout is not socket._GLOBAL_DEFAULT_TIMEOUT:
- sock.settimeout(timeout)
- if source_address:
- sock.bind(source_address)
- sock.connect(sa)
- return sock
-
- except socket.error as e:
- err = e
- if sock is not None:
- sock.close()
- sock = None
-
- if err is not None:
- raise err
-
- raise socket.error("getaddrinfo returns an empty list")
-
-
-def _set_socket_options(sock, options):
- if options is None:
- return
-
- for opt in options:
- sock.setsockopt(*opt)
-
-
-def allowed_gai_family():
- """This function is designed to work in the context of
- getaddrinfo, where family=socket.AF_UNSPEC is the default and
- will perform a DNS search for both IPv6 and IPv4 records."""
-
- family = socket.AF_INET
- if HAS_IPV6:
- family = socket.AF_UNSPEC
- return family
-
-
-def _has_ipv6(host):
- """ Returns True if the system can bind an IPv6 address. """
- sock = None
- has_ipv6 = False
-
- if socket.has_ipv6:
- # has_ipv6 returns true if cPython was compiled with IPv6 support.
- # It does not tell us if the system has IPv6 support enabled. To
- # determine that we must bind to an IPv6 address.
- # https://github.com/shazow/urllib3/pull/611
- # https://bugs.python.org/issue658327
- try:
- sock = socket.socket(socket.AF_INET6)
- sock.bind((host, 0))
- has_ipv6 = True
- except Exception:
- pass
-
- if sock:
- sock.close()
- return has_ipv6
-
-
-HAS_IPV6 = _has_ipv6('::1')
diff --git a/src/collectors/python.d.plugin/python_modules/urllib3/util/request.py b/src/collectors/python.d.plugin/python_modules/urllib3/util/request.py
deleted file mode 100644
index 18f27b032..000000000
--- a/src/collectors/python.d.plugin/python_modules/urllib3/util/request.py
+++ /dev/null
@@ -1,119 +0,0 @@
-# SPDX-License-Identifier: MIT
-from __future__ import absolute_import
-from base64 import b64encode
-
-from ..packages.six import b, integer_types
-from ..exceptions import UnrewindableBodyError
-
-ACCEPT_ENCODING = 'gzip,deflate'
-_FAILEDTELL = object()
-
-
-def make_headers(keep_alive=None, accept_encoding=None, user_agent=None,
- basic_auth=None, proxy_basic_auth=None, disable_cache=None):
- """
- Shortcuts for generating request headers.
-
- :param keep_alive:
- If ``True``, adds 'connection: keep-alive' header.
-
- :param accept_encoding:
- Can be a boolean, list, or string.
- ``True`` translates to 'gzip,deflate'.
- List will get joined by comma.
- String will be used as provided.
-
- :param user_agent:
- String representing the user-agent you want, such as
- "python-urllib3/0.6"
-
- :param basic_auth:
- Colon-separated username:password string for 'authorization: basic ...'
- auth header.
-
- :param proxy_basic_auth:
- Colon-separated username:password string for 'proxy-authorization: basic ...'
- auth header.
-
- :param disable_cache:
- If ``True``, adds 'cache-control: no-cache' header.
-
- Example::
-
- >>> make_headers(keep_alive=True, user_agent="Batman/1.0")
- {'connection': 'keep-alive', 'user-agent': 'Batman/1.0'}
- >>> make_headers(accept_encoding=True)
- {'accept-encoding': 'gzip,deflate'}
- """
- headers = {}
- if accept_encoding:
- if isinstance(accept_encoding, str):
- pass
- elif isinstance(accept_encoding, list):
- accept_encoding = ','.join(accept_encoding)
- else:
- accept_encoding = ACCEPT_ENCODING
- headers['accept-encoding'] = accept_encoding
-
- if user_agent:
- headers['user-agent'] = user_agent
-
- if keep_alive:
- headers['connection'] = 'keep-alive'
-
- if basic_auth:
- headers['authorization'] = 'Basic ' + \
- b64encode(b(basic_auth)).decode('utf-8')
-
- if proxy_basic_auth:
- headers['proxy-authorization'] = 'Basic ' + \
- b64encode(b(proxy_basic_auth)).decode('utf-8')
-
- if disable_cache:
- headers['cache-control'] = 'no-cache'
-
- return headers
-
-
-def set_file_position(body, pos):
- """
- If a position is provided, move file to that point.
- Otherwise, we'll attempt to record a position for future use.
- """
- if pos is not None:
- rewind_body(body, pos)
- elif getattr(body, 'tell', None) is not None:
- try:
- pos = body.tell()
- except (IOError, OSError):
- # This differentiates from None, allowing us to catch
- # a failed `tell()` later when trying to rewind the body.
- pos = _FAILEDTELL
-
- return pos
-
-
-def rewind_body(body, body_pos):
- """
- Attempt to rewind body to a certain position.
- Primarily used for request redirects and retries.
-
- :param body:
- File-like object that supports seek.
-
- :param int pos:
- Position to seek to in file.
- """
- body_seek = getattr(body, 'seek', None)
- if body_seek is not None and isinstance(body_pos, integer_types):
- try:
- body_seek(body_pos)
- except (IOError, OSError):
- raise UnrewindableBodyError("An error occurred when rewinding request "
- "body for redirect/retry.")
- elif body_pos is _FAILEDTELL:
- raise UnrewindableBodyError("Unable to record file position for rewinding "
- "request body during a redirect/retry.")
- else:
- raise ValueError("body_pos must be of type integer, "
- "instead it was %s." % type(body_pos))
diff --git a/src/collectors/python.d.plugin/python_modules/urllib3/util/response.py b/src/collectors/python.d.plugin/python_modules/urllib3/util/response.py
deleted file mode 100644
index e4cda93d4..000000000
--- a/src/collectors/python.d.plugin/python_modules/urllib3/util/response.py
+++ /dev/null
@@ -1,82 +0,0 @@
-# SPDX-License-Identifier: MIT
-from __future__ import absolute_import
-from ..packages.six.moves import http_client as httplib
-
-from ..exceptions import HeaderParsingError
-
-
-def is_fp_closed(obj):
- """
- Checks whether a given file-like object is closed.
-
- :param obj:
- The file-like object to check.
- """
-
- try:
- # Check `isclosed()` first, in case Python3 doesn't set `closed`.
- # GH Issue #928
- return obj.isclosed()
- except AttributeError:
- pass
-
- try:
- # Check via the official file-like-object way.
- return obj.closed
- except AttributeError:
- pass
-
- try:
- # Check if the object is a container for another file-like object that
- # gets released on exhaustion (e.g. HTTPResponse).
- return obj.fp is None
- except AttributeError:
- pass
-
- raise ValueError("Unable to determine whether fp is closed.")
-
-
-def assert_header_parsing(headers):
- """
- Asserts whether all headers have been successfully parsed.
- Extracts encountered errors from the result of parsing headers.
-
- Only works on Python 3.
-
- :param headers: Headers to verify.
- :type headers: `httplib.HTTPMessage`.
-
- :raises urllib3.exceptions.HeaderParsingError:
- If parsing errors are found.
- """
-
- # This will fail silently if we pass in the wrong kind of parameter.
- # To make debugging easier add an explicit check.
- if not isinstance(headers, httplib.HTTPMessage):
- raise TypeError('expected httplib.Message, got {0}.'.format(
- type(headers)))
-
- defects = getattr(headers, 'defects', None)
- get_payload = getattr(headers, 'get_payload', None)
-
- unparsed_data = None
- if get_payload: # Platform-specific: Python 3.
- unparsed_data = get_payload()
-
- if defects or unparsed_data:
- raise HeaderParsingError(defects=defects, unparsed_data=unparsed_data)
-
-
-def is_response_to_head(response):
- """
- Checks whether the request of a response has been a HEAD-request.
- Handles the quirks of AppEngine.
-
- :param conn:
- :type conn: :class:`httplib.HTTPResponse`
- """
- # FIXME: Can we do this somehow without accessing private httplib _method?
- method = response._method
- if isinstance(method, int): # Platform-specific: Appengine
- return method == 3
- return method.upper() == 'HEAD'
diff --git a/src/collectors/python.d.plugin/python_modules/urllib3/util/retry.py b/src/collectors/python.d.plugin/python_modules/urllib3/util/retry.py
deleted file mode 100644
index 61e63afec..000000000
--- a/src/collectors/python.d.plugin/python_modules/urllib3/util/retry.py
+++ /dev/null
@@ -1,402 +0,0 @@
-# SPDX-License-Identifier: MIT
-from __future__ import absolute_import
-import time
-import logging
-from collections import namedtuple
-from itertools import takewhile
-import email
-import re
-
-from ..exceptions import (
- ConnectTimeoutError,
- MaxRetryError,
- ProtocolError,
- ReadTimeoutError,
- ResponseError,
- InvalidHeader,
-)
-from ..packages import six
-
-
-log = logging.getLogger(__name__)
-
-# Data structure for representing the metadata of requests that result in a retry.
-RequestHistory = namedtuple('RequestHistory', ["method", "url", "error",
- "status", "redirect_location"])
-
-
-class Retry(object):
- """ Retry configuration.
-
- Each retry attempt will create a new Retry object with updated values, so
- they can be safely reused.
-
- Retries can be defined as a default for a pool::
-
- retries = Retry(connect=5, read=2, redirect=5)
- http = PoolManager(retries=retries)
- response = http.request('GET', 'http://example.com/')
-
- Or per-request (which overrides the default for the pool)::
-
- response = http.request('GET', 'http://example.com/', retries=Retry(10))
-
- Retries can be disabled by passing ``False``::
-
- response = http.request('GET', 'http://example.com/', retries=False)
-
- Errors will be wrapped in :class:`~urllib3.exceptions.MaxRetryError` unless
- retries are disabled, in which case the causing exception will be raised.
-
- :param int total:
- Total number of retries to allow. Takes precedence over other counts.
-
- Set to ``None`` to remove this constraint and fall back on other
- counts. It's a good idea to set this to some sensibly-high value to
- account for unexpected edge cases and avoid infinite retry loops.
-
- Set to ``0`` to fail on the first retry.
-
- Set to ``False`` to disable and imply ``raise_on_redirect=False``.
-
- :param int connect:
- How many connection-related errors to retry on.
-
- These are errors raised before the request is sent to the remote server,
- which we assume has not triggered the server to process the request.
-
- Set to ``0`` to fail on the first retry of this type.
-
- :param int read:
- How many times to retry on read errors.
-
- These errors are raised after the request was sent to the server, so the
- request may have side-effects.
-
- Set to ``0`` to fail on the first retry of this type.
-
- :param int redirect:
- How many redirects to perform. Limit this to avoid infinite redirect
- loops.
-
- A redirect is a HTTP response with a status code 301, 302, 303, 307 or
- 308.
-
- Set to ``0`` to fail on the first retry of this type.
-
- Set to ``False`` to disable and imply ``raise_on_redirect=False``.
-
- :param int status:
- How many times to retry on bad status codes.
-
- These are retries made on responses, where status code matches
- ``status_forcelist``.
-
- Set to ``0`` to fail on the first retry of this type.
-
- :param iterable method_whitelist:
- Set of uppercased HTTP method verbs that we should retry on.
-
- By default, we only retry on methods which are considered to be
- idempotent (multiple requests with the same parameters end with the
- same state). See :attr:`Retry.DEFAULT_METHOD_WHITELIST`.
-
- Set to a ``False`` value to retry on any verb.
-
- :param iterable status_forcelist:
- A set of integer HTTP status codes that we should force a retry on.
- A retry is initiated if the request method is in ``method_whitelist``
- and the response status code is in ``status_forcelist``.
-
- By default, this is disabled with ``None``.
-
- :param float backoff_factor:
- A backoff factor to apply between attempts after the second try
- (most errors are resolved immediately by a second try without a
- delay). urllib3 will sleep for::
-
- {backoff factor} * (2 ^ ({number of total retries} - 1))
-
- seconds. If the backoff_factor is 0.1, then :func:`.sleep` will sleep
- for [0.0s, 0.2s, 0.4s, ...] between retries. It will never be longer
- than :attr:`Retry.BACKOFF_MAX`.
-
- By default, backoff is disabled (set to 0).
-
- :param bool raise_on_redirect: Whether, if the number of redirects is
- exhausted, to raise a MaxRetryError, or to return a response with a
- response code in the 3xx range.
-
- :param bool raise_on_status: Similar meaning to ``raise_on_redirect``:
- whether we should raise an exception, or return a response,
- if status falls in ``status_forcelist`` range and retries have
- been exhausted.
-
- :param tuple history: The history of the request encountered during
- each call to :meth:`~Retry.increment`. The list is in the order
- the requests occurred. Each list item is of class :class:`RequestHistory`.
-
- :param bool respect_retry_after_header:
- Whether to respect Retry-After header on status codes defined as
- :attr:`Retry.RETRY_AFTER_STATUS_CODES` or not.
-
- """
-
- DEFAULT_METHOD_WHITELIST = frozenset([
- 'HEAD', 'GET', 'PUT', 'DELETE', 'OPTIONS', 'TRACE'])
-
- RETRY_AFTER_STATUS_CODES = frozenset([413, 429, 503])
-
- #: Maximum backoff time.
- BACKOFF_MAX = 120
-
- def __init__(self, total=10, connect=None, read=None, redirect=None, status=None,
- method_whitelist=DEFAULT_METHOD_WHITELIST, status_forcelist=None,
- backoff_factor=0, raise_on_redirect=True, raise_on_status=True,
- history=None, respect_retry_after_header=True):
-
- self.total = total
- self.connect = connect
- self.read = read
- self.status = status
-
- if redirect is False or total is False:
- redirect = 0
- raise_on_redirect = False
-
- self.redirect = redirect
- self.status_forcelist = status_forcelist or set()
- self.method_whitelist = method_whitelist
- self.backoff_factor = backoff_factor
- self.raise_on_redirect = raise_on_redirect
- self.raise_on_status = raise_on_status
- self.history = history or tuple()
- self.respect_retry_after_header = respect_retry_after_header
-
- def new(self, **kw):
- params = dict(
- total=self.total,
- connect=self.connect, read=self.read, redirect=self.redirect, status=self.status,
- method_whitelist=self.method_whitelist,
- status_forcelist=self.status_forcelist,
- backoff_factor=self.backoff_factor,
- raise_on_redirect=self.raise_on_redirect,
- raise_on_status=self.raise_on_status,
- history=self.history,
- )
- params.update(kw)
- return type(self)(**params)
-
- @classmethod
- def from_int(cls, retries, redirect=True, default=None):
- """ Backwards-compatibility for the old retries format."""
- if retries is None:
- retries = default if default is not None else cls.DEFAULT
-
- if isinstance(retries, Retry):
- return retries
-
- redirect = bool(redirect) and None
- new_retries = cls(retries, redirect=redirect)
- log.debug("Converted retries value: %r -> %r", retries, new_retries)
- return new_retries
-
- def get_backoff_time(self):
- """ Formula for computing the current backoff
-
- :rtype: float
- """
- # We want to consider only the last consecutive errors sequence (Ignore redirects).
- consecutive_errors_len = len(list(takewhile(lambda x: x.redirect_location is None,
- reversed(self.history))))
- if consecutive_errors_len <= 1:
- return 0
-
- backoff_value = self.backoff_factor * (2 ** (consecutive_errors_len - 1))
- return min(self.BACKOFF_MAX, backoff_value)
-
- def parse_retry_after(self, retry_after):
- # Whitespace: https://tools.ietf.org/html/rfc7230#section-3.2.4
- if re.match(r"^\s*[0-9]+\s*$", retry_after):
- seconds = int(retry_after)
- else:
- retry_date_tuple = email.utils.parsedate(retry_after)
- if retry_date_tuple is None:
- raise InvalidHeader("Invalid Retry-After header: %s" % retry_after)
- retry_date = time.mktime(retry_date_tuple)
- seconds = retry_date - time.time()
-
- if seconds < 0:
- seconds = 0
-
- return seconds
-
- def get_retry_after(self, response):
- """ Get the value of Retry-After in seconds. """
-
- retry_after = response.getheader("Retry-After")
-
- if retry_after is None:
- return None
-
- return self.parse_retry_after(retry_after)
-
- def sleep_for_retry(self, response=None):
- retry_after = self.get_retry_after(response)
- if retry_after:
- time.sleep(retry_after)
- return True
-
- return False
-
- def _sleep_backoff(self):
- backoff = self.get_backoff_time()
- if backoff <= 0:
- return
- time.sleep(backoff)
-
- def sleep(self, response=None):
- """ Sleep between retry attempts.
-
- This method will respect a server's ``Retry-After`` response header
- and sleep the duration of the time requested. If that is not present, it
- will use an exponential backoff. By default, the backoff factor is 0 and
- this method will return immediately.
- """
-
- if response:
- slept = self.sleep_for_retry(response)
- if slept:
- return
-
- self._sleep_backoff()
-
- def _is_connection_error(self, err):
- """ Errors when we're fairly sure that the server did not receive the
- request, so it should be safe to retry.
- """
- return isinstance(err, ConnectTimeoutError)
-
- def _is_read_error(self, err):
- """ Errors that occur after the request has been started, so we should
- assume that the server began processing it.
- """
- return isinstance(err, (ReadTimeoutError, ProtocolError))
-
- def _is_method_retryable(self, method):
- """ Checks if a given HTTP method should be retried upon, depending if
- it is included on the method whitelist.
- """
- if self.method_whitelist and method.upper() not in self.method_whitelist:
- return False
-
- return True
-
- def is_retry(self, method, status_code, has_retry_after=False):
- """ Is this method/status code retryable? (Based on whitelists and control
- variables such as the number of total retries to allow, whether to
- respect the Retry-After header, whether this header is present, and
- whether the returned status code is on the list of status codes to
- be retried upon on the presence of the aforementioned header)
- """
- if not self._is_method_retryable(method):
- return False
-
- if self.status_forcelist and status_code in self.status_forcelist:
- return True
-
- return (self.total and self.respect_retry_after_header and
- has_retry_after and (status_code in self.RETRY_AFTER_STATUS_CODES))
-
- def is_exhausted(self):
- """ Are we out of retries? """
- retry_counts = (self.total, self.connect, self.read, self.redirect, self.status)
- retry_counts = list(filter(None, retry_counts))
- if not retry_counts:
- return False
-
- return min(retry_counts) < 0
-
- def increment(self, method=None, url=None, response=None, error=None,
- _pool=None, _stacktrace=None):
- """ Return a new Retry object with incremented retry counters.
-
- :param response: A response object, or None, if the server did not
- return a response.
- :type response: :class:`~urllib3.response.HTTPResponse`
- :param Exception error: An error encountered during the request, or
- None if the response was received successfully.
-
- :return: A new ``Retry`` object.
- """
- if self.total is False and error:
- # Disabled, indicate to re-raise the error.
- raise six.reraise(type(error), error, _stacktrace)
-
- total = self.total
- if total is not None:
- total -= 1
-
- connect = self.connect
- read = self.read
- redirect = self.redirect
- status_count = self.status
- cause = 'unknown'
- status = None
- redirect_location = None
-
- if error and self._is_connection_error(error):
- # Connect retry?
- if connect is False:
- raise six.reraise(type(error), error, _stacktrace)
- elif connect is not None:
- connect -= 1
-
- elif error and self._is_read_error(error):
- # Read retry?
- if read is False or not self._is_method_retryable(method):
- raise six.reraise(type(error), error, _stacktrace)
- elif read is not None:
- read -= 1
-
- elif response and response.get_redirect_location():
- # Redirect retry?
- if redirect is not None:
- redirect -= 1
- cause = 'too many redirects'
- redirect_location = response.get_redirect_location()
- status = response.status
-
- else:
- # Incrementing because of a server error like a 500 in
- # status_forcelist and a the given method is in the whitelist
- cause = ResponseError.GENERIC_ERROR
- if response and response.status:
- if status_count is not None:
- status_count -= 1
- cause = ResponseError.SPECIFIC_ERROR.format(
- status_code=response.status)
- status = response.status
-
- history = self.history + (RequestHistory(method, url, error, status, redirect_location),)
-
- new_retry = self.new(
- total=total,
- connect=connect, read=read, redirect=redirect, status=status_count,
- history=history)
-
- if new_retry.is_exhausted():
- raise MaxRetryError(_pool, url, error or ResponseError(cause))
-
- log.debug("Incremented Retry for (url='%s'): %r", url, new_retry)
-
- return new_retry
-
- def __repr__(self):
- return ('{cls.__name__}(total={self.total}, connect={self.connect}, '
- 'read={self.read}, redirect={self.redirect}, status={self.status})').format(
- cls=type(self), self=self)
-
-
-# For backwards compatibility (equivalent to pre-v1.9):
-Retry.DEFAULT = Retry(3)
diff --git a/src/collectors/python.d.plugin/python_modules/urllib3/util/selectors.py b/src/collectors/python.d.plugin/python_modules/urllib3/util/selectors.py
deleted file mode 100644
index de5e49838..000000000
--- a/src/collectors/python.d.plugin/python_modules/urllib3/util/selectors.py
+++ /dev/null
@@ -1,588 +0,0 @@
-# SPDX-License-Identifier: MIT
-# Backport of selectors.py from Python 3.5+ to support Python < 3.4
-# Also has the behavior specified in PEP 475 which is to retry syscalls
-# in the case of an EINTR error. This module is required because selectors34
-# does not follow this behavior and instead returns that no dile descriptor
-# events have occurred rather than retry the syscall. The decision to drop
-# support for select.devpoll is made to maintain 100% test coverage.
-
-import errno
-import math
-import select
-import socket
-import sys
-import time
-
-from collections import namedtuple
-
-try:
- from collections import Mapping
-except ImportError:
- from collections.abc import Mapping
-
-try:
- monotonic = time.monotonic
-except (AttributeError, ImportError): # Python 3.3<
- monotonic = time.time
-
-EVENT_READ = (1 << 0)
-EVENT_WRITE = (1 << 1)
-
-HAS_SELECT = True # Variable that shows whether the platform has a selector.
-_SYSCALL_SENTINEL = object() # Sentinel in case a system call returns None.
-_DEFAULT_SELECTOR = None
-
-
-class SelectorError(Exception):
- def __init__(self, errcode):
- super(SelectorError, self).__init__()
- self.errno = errcode
-
- def __repr__(self):
- return "<SelectorError errno={0}>".format(self.errno)
-
- def __str__(self):
- return self.__repr__()
-
-
-def _fileobj_to_fd(fileobj):
- """ Return a file descriptor from a file object. If
- given an integer will simply return that integer back. """
- if isinstance(fileobj, int):
- fd = fileobj
- else:
- try:
- fd = int(fileobj.fileno())
- except (AttributeError, TypeError, ValueError):
- raise ValueError("Invalid file object: {0!r}".format(fileobj))
- if fd < 0:
- raise ValueError("Invalid file descriptor: {0}".format(fd))
- return fd
-
-
-# Determine which function to use to wrap system calls because Python 3.5+
-# already handles the case when system calls are interrupted.
-if sys.version_info >= (3, 5):
- def _syscall_wrapper(func, _, *args, **kwargs):
- """ This is the short-circuit version of the below logic
- because in Python 3.5+ all system calls automatically restart
- and recalculate their timeouts. """
- try:
- return func(*args, **kwargs)
- except (OSError, IOError, select.error) as e:
- errcode = None
- if hasattr(e, "errno"):
- errcode = e.errno
- raise SelectorError(errcode)
-else:
- def _syscall_wrapper(func, recalc_timeout, *args, **kwargs):
- """ Wrapper function for syscalls that could fail due to EINTR.
- All functions should be retried if there is time left in the timeout
- in accordance with PEP 475. """
- timeout = kwargs.get("timeout", None)
- if timeout is None:
- expires = None
- recalc_timeout = False
- else:
- timeout = float(timeout)
- if timeout < 0.0: # Timeout less than 0 treated as no timeout.
- expires = None
- else:
- expires = monotonic() + timeout
-
- args = list(args)
- if recalc_timeout and "timeout" not in kwargs:
- raise ValueError(
- "Timeout must be in args or kwargs to be recalculated")
-
- result = _SYSCALL_SENTINEL
- while result is _SYSCALL_SENTINEL:
- try:
- result = func(*args, **kwargs)
- # OSError is thrown by select.select
- # IOError is thrown by select.epoll.poll
- # select.error is thrown by select.poll.poll
- # Aren't we thankful for Python 3.x rework for exceptions?
- except (OSError, IOError, select.error) as e:
- # select.error wasn't a subclass of OSError in the past.
- errcode = None
- if hasattr(e, "errno"):
- errcode = e.errno
- elif hasattr(e, "args"):
- errcode = e.args[0]
-
- # Also test for the Windows equivalent of EINTR.
- is_interrupt = (errcode == errno.EINTR or (hasattr(errno, "WSAEINTR") and
- errcode == errno.WSAEINTR))
-
- if is_interrupt:
- if expires is not None:
- current_time = monotonic()
- if current_time > expires:
- raise OSError(errno=errno.ETIMEDOUT)
- if recalc_timeout:
- if "timeout" in kwargs:
- kwargs["timeout"] = expires - current_time
- continue
- if errcode:
- raise SelectorError(errcode)
- else:
- raise
- return result
-
-
-SelectorKey = namedtuple('SelectorKey', ['fileobj', 'fd', 'events', 'data'])
-
-
-class _SelectorMapping(Mapping):
- """ Mapping of file objects to selector keys """
-
- def __init__(self, selector):
- self._selector = selector
-
- def __len__(self):
- return len(self._selector._fd_to_key)
-
- def __getitem__(self, fileobj):
- try:
- fd = self._selector._fileobj_lookup(fileobj)
- return self._selector._fd_to_key[fd]
- except KeyError:
- raise KeyError("{0!r} is not registered.".format(fileobj))
-
- def __iter__(self):
- return iter(self._selector._fd_to_key)
-
-
-class BaseSelector(object):
- """ Abstract Selector class
-
- A selector supports registering file objects to be monitored
- for specific I/O events.
-
- A file object is a file descriptor or any object with a
- `fileno()` method. An arbitrary object can be attached to the
- file object which can be used for example to store context info,
- a callback, etc.
-
- A selector can use various implementations (select(), poll(), epoll(),
- and kqueue()) depending on the platform. The 'DefaultSelector' class uses
- the most efficient implementation for the current platform.
- """
- def __init__(self):
- # Maps file descriptors to keys.
- self._fd_to_key = {}
-
- # Read-only mapping returned by get_map()
- self._map = _SelectorMapping(self)
-
- def _fileobj_lookup(self, fileobj):
- """ Return a file descriptor from a file object.
- This wraps _fileobj_to_fd() to do an exhaustive
- search in case the object is invalid but we still
- have it in our map. Used by unregister() so we can
- unregister an object that was previously registered
- even if it is closed. It is also used by _SelectorMapping
- """
- try:
- return _fileobj_to_fd(fileobj)
- except ValueError:
-
- # Search through all our mapped keys.
- for key in self._fd_to_key.values():
- if key.fileobj is fileobj:
- return key.fd
-
- # Raise ValueError after all.
- raise
-
- def register(self, fileobj, events, data=None):
- """ Register a file object for a set of events to monitor. """
- if (not events) or (events & ~(EVENT_READ | EVENT_WRITE)):
- raise ValueError("Invalid events: {0!r}".format(events))
-
- key = SelectorKey(fileobj, self._fileobj_lookup(fileobj), events, data)
-
- if key.fd in self._fd_to_key:
- raise KeyError("{0!r} (FD {1}) is already registered"
- .format(fileobj, key.fd))
-
- self._fd_to_key[key.fd] = key
- return key
-
- def unregister(self, fileobj):
- """ Unregister a file object from being monitored. """
- try:
- key = self._fd_to_key.pop(self._fileobj_lookup(fileobj))
- except KeyError:
- raise KeyError("{0!r} is not registered".format(fileobj))
-
- # Getting the fileno of a closed socket on Windows errors with EBADF.
- except socket.error as e: # Platform-specific: Windows.
- if e.errno != errno.EBADF:
- raise
- else:
- for key in self._fd_to_key.values():
- if key.fileobj is fileobj:
- self._fd_to_key.pop(key.fd)
- break
- else:
- raise KeyError("{0!r} is not registered".format(fileobj))
- return key
-
- def modify(self, fileobj, events, data=None):
- """ Change a registered file object monitored events and data. """
- # NOTE: Some subclasses optimize this operation even further.
- try:
- key = self._fd_to_key[self._fileobj_lookup(fileobj)]
- except KeyError:
- raise KeyError("{0!r} is not registered".format(fileobj))
-
- if events != key.events:
- self.unregister(fileobj)
- key = self.register(fileobj, events, data)
-
- elif data != key.data:
- # Use a shortcut to update the data.
- key = key._replace(data=data)
- self._fd_to_key[key.fd] = key
-
- return key
-
- def select(self, timeout=None):
- """ Perform the actual selection until some monitored file objects
- are ready or the timeout expires. """
- raise NotImplementedError()
-
- def close(self):
- """ Close the selector. This must be called to ensure that all
- underlying resources are freed. """
- self._fd_to_key.clear()
- self._map = None
-
- def get_key(self, fileobj):
- """ Return the key associated with a registered file object. """
- mapping = self.get_map()
- if mapping is None:
- raise RuntimeError("Selector is closed")
- try:
- return mapping[fileobj]
- except KeyError:
- raise KeyError("{0!r} is not registered".format(fileobj))
-
- def get_map(self):
- """ Return a mapping of file objects to selector keys """
- return self._map
-
- def _key_from_fd(self, fd):
- """ Return the key associated to a given file descriptor
- Return None if it is not found. """
- try:
- return self._fd_to_key[fd]
- except KeyError:
- return None
-
- def __enter__(self):
- return self
-
- def __exit__(self, *args):
- self.close()
-
-
-# Almost all platforms have select.select()
-if hasattr(select, "select"):
- class SelectSelector(BaseSelector):
- """ Select-based selector. """
- def __init__(self):
- super(SelectSelector, self).__init__()
- self._readers = set()
- self._writers = set()
-
- def register(self, fileobj, events, data=None):
- key = super(SelectSelector, self).register(fileobj, events, data)
- if events & EVENT_READ:
- self._readers.add(key.fd)
- if events & EVENT_WRITE:
- self._writers.add(key.fd)
- return key
-
- def unregister(self, fileobj):
- key = super(SelectSelector, self).unregister(fileobj)
- self._readers.discard(key.fd)
- self._writers.discard(key.fd)
- return key
-
- def _select(self, r, w, timeout=None):
- """ Wrapper for select.select because timeout is a positional arg """
- return select.select(r, w, [], timeout)
-
- def select(self, timeout=None):
- # Selecting on empty lists on Windows errors out.
- if not len(self._readers) and not len(self._writers):
- return []
-
- timeout = None if timeout is None else max(timeout, 0.0)
- ready = []
- r, w, _ = _syscall_wrapper(self._select, True, self._readers,
- self._writers, timeout)
- r = set(r)
- w = set(w)
- for fd in r | w:
- events = 0
- if fd in r:
- events |= EVENT_READ
- if fd in w:
- events |= EVENT_WRITE
-
- key = self._key_from_fd(fd)
- if key:
- ready.append((key, events & key.events))
- return ready
-
-
-if hasattr(select, "poll"):
- class PollSelector(BaseSelector):
- """ Poll-based selector """
- def __init__(self):
- super(PollSelector, self).__init__()
- self._poll = select.poll()
-
- def register(self, fileobj, events, data=None):
- key = super(PollSelector, self).register(fileobj, events, data)
- event_mask = 0
- if events & EVENT_READ:
- event_mask |= select.POLLIN
- if events & EVENT_WRITE:
- event_mask |= select.POLLOUT
- self._poll.register(key.fd, event_mask)
- return key
-
- def unregister(self, fileobj):
- key = super(PollSelector, self).unregister(fileobj)
- self._poll.unregister(key.fd)
- return key
-
- def _wrap_poll(self, timeout=None):
- """ Wrapper function for select.poll.poll() so that
- _syscall_wrapper can work with only seconds. """
- if timeout is not None:
- if timeout <= 0:
- timeout = 0
- else:
- # select.poll.poll() has a resolution of 1 millisecond,
- # round away from zero to wait *at least* timeout seconds.
- timeout = math.ceil(timeout * 1e3)
-
- result = self._poll.poll(timeout)
- return result
-
- def select(self, timeout=None):
- ready = []
- fd_events = _syscall_wrapper(self._wrap_poll, True, timeout=timeout)
- for fd, event_mask in fd_events:
- events = 0
- if event_mask & ~select.POLLIN:
- events |= EVENT_WRITE
- if event_mask & ~select.POLLOUT:
- events |= EVENT_READ
-
- key = self._key_from_fd(fd)
- if key:
- ready.append((key, events & key.events))
-
- return ready
-
-
-if hasattr(select, "epoll"):
- class EpollSelector(BaseSelector):
- """ Epoll-based selector """
- def __init__(self):
- super(EpollSelector, self).__init__()
- self._epoll = select.epoll()
-
- def fileno(self):
- return self._epoll.fileno()
-
- def register(self, fileobj, events, data=None):
- key = super(EpollSelector, self).register(fileobj, events, data)
- events_mask = 0
- if events & EVENT_READ:
- events_mask |= select.EPOLLIN
- if events & EVENT_WRITE:
- events_mask |= select.EPOLLOUT
- _syscall_wrapper(self._epoll.register, False, key.fd, events_mask)
- return key
-
- def unregister(self, fileobj):
- key = super(EpollSelector, self).unregister(fileobj)
- try:
- _syscall_wrapper(self._epoll.unregister, False, key.fd)
- except SelectorError:
- # This can occur when the fd was closed since registry.
- pass
- return key
-
- def select(self, timeout=None):
- if timeout is not None:
- if timeout <= 0:
- timeout = 0.0
- else:
- # select.epoll.poll() has a resolution of 1 millisecond
- # but luckily takes seconds so we don't need a wrapper
- # like PollSelector. Just for better rounding.
- timeout = math.ceil(timeout * 1e3) * 1e-3
- timeout = float(timeout)
- else:
- timeout = -1.0 # epoll.poll() must have a float.
-
- # We always want at least 1 to ensure that select can be called
- # with no file descriptors registered. Otherwise will fail.
- max_events = max(len(self._fd_to_key), 1)
-
- ready = []
- fd_events = _syscall_wrapper(self._epoll.poll, True,
- timeout=timeout,
- maxevents=max_events)
- for fd, event_mask in fd_events:
- events = 0
- if event_mask & ~select.EPOLLIN:
- events |= EVENT_WRITE
- if event_mask & ~select.EPOLLOUT:
- events |= EVENT_READ
-
- key = self._key_from_fd(fd)
- if key:
- ready.append((key, events & key.events))
- return ready
-
- def close(self):
- self._epoll.close()
- super(EpollSelector, self).close()
-
-
-if hasattr(select, "kqueue"):
- class KqueueSelector(BaseSelector):
- """ Kqueue / Kevent-based selector """
- def __init__(self):
- super(KqueueSelector, self).__init__()
- self._kqueue = select.kqueue()
-
- def fileno(self):
- return self._kqueue.fileno()
-
- def register(self, fileobj, events, data=None):
- key = super(KqueueSelector, self).register(fileobj, events, data)
- if events & EVENT_READ:
- kevent = select.kevent(key.fd,
- select.KQ_FILTER_READ,
- select.KQ_EV_ADD)
-
- _syscall_wrapper(self._kqueue.control, False, [kevent], 0, 0)
-
- if events & EVENT_WRITE:
- kevent = select.kevent(key.fd,
- select.KQ_FILTER_WRITE,
- select.KQ_EV_ADD)
-
- _syscall_wrapper(self._kqueue.control, False, [kevent], 0, 0)
-
- return key
-
- def unregister(self, fileobj):
- key = super(KqueueSelector, self).unregister(fileobj)
- if key.events & EVENT_READ:
- kevent = select.kevent(key.fd,
- select.KQ_FILTER_READ,
- select.KQ_EV_DELETE)
- try:
- _syscall_wrapper(self._kqueue.control, False, [kevent], 0, 0)
- except SelectorError:
- pass
- if key.events & EVENT_WRITE:
- kevent = select.kevent(key.fd,
- select.KQ_FILTER_WRITE,
- select.KQ_EV_DELETE)
- try:
- _syscall_wrapper(self._kqueue.control, False, [kevent], 0, 0)
- except SelectorError:
- pass
-
- return key
-
- def select(self, timeout=None):
- if timeout is not None:
- timeout = max(timeout, 0)
-
- max_events = len(self._fd_to_key) * 2
- ready_fds = {}
-
- kevent_list = _syscall_wrapper(self._kqueue.control, True,
- None, max_events, timeout)
-
- for kevent in kevent_list:
- fd = kevent.ident
- event_mask = kevent.filter
- events = 0
- if event_mask == select.KQ_FILTER_READ:
- events |= EVENT_READ
- if event_mask == select.KQ_FILTER_WRITE:
- events |= EVENT_WRITE
-
- key = self._key_from_fd(fd)
- if key:
- if key.fd not in ready_fds:
- ready_fds[key.fd] = (key, events & key.events)
- else:
- old_events = ready_fds[key.fd][1]
- ready_fds[key.fd] = (key, (events | old_events) & key.events)
-
- return list(ready_fds.values())
-
- def close(self):
- self._kqueue.close()
- super(KqueueSelector, self).close()
-
-
-if not hasattr(select, 'select'): # Platform-specific: AppEngine
- HAS_SELECT = False
-
-
-def _can_allocate(struct):
- """ Checks that select structs can be allocated by the underlying
- operating system, not just advertised by the select module. We don't
- check select() because we'll be hopeful that most platforms that
- don't have it available will not advertise it. (ie: GAE) """
- try:
- # select.poll() objects won't fail until used.
- if struct == 'poll':
- p = select.poll()
- p.poll(0)
-
- # All others will fail on allocation.
- else:
- getattr(select, struct)().close()
- return True
- except (OSError, AttributeError) as e:
- return False
-
-
-# Choose the best implementation, roughly:
-# kqueue == epoll > poll > select. Devpoll not supported. (See above)
-# select() also can't accept a FD > FD_SETSIZE (usually around 1024)
-def DefaultSelector():
- """ This function serves as a first call for DefaultSelector to
- detect if the select module is being monkey-patched incorrectly
- by eventlet, greenlet, and preserve proper behavior. """
- global _DEFAULT_SELECTOR
- if _DEFAULT_SELECTOR is None:
- if _can_allocate('kqueue'):
- _DEFAULT_SELECTOR = KqueueSelector
- elif _can_allocate('epoll'):
- _DEFAULT_SELECTOR = EpollSelector
- elif _can_allocate('poll'):
- _DEFAULT_SELECTOR = PollSelector
- elif hasattr(select, 'select'):
- _DEFAULT_SELECTOR = SelectSelector
- else: # Platform-specific: AppEngine
- raise ValueError('Platform does not have a selector')
- return _DEFAULT_SELECTOR()
diff --git a/src/collectors/python.d.plugin/python_modules/urllib3/util/ssl_.py b/src/collectors/python.d.plugin/python_modules/urllib3/util/ssl_.py
deleted file mode 100644
index ece3ec39e..000000000
--- a/src/collectors/python.d.plugin/python_modules/urllib3/util/ssl_.py
+++ /dev/null
@@ -1,338 +0,0 @@
-# SPDX-License-Identifier: MIT
-from __future__ import absolute_import
-import errno
-import warnings
-import hmac
-
-from binascii import hexlify, unhexlify
-from hashlib import md5, sha1, sha256
-
-from ..exceptions import SSLError, InsecurePlatformWarning, SNIMissingWarning
-
-
-SSLContext = None
-HAS_SNI = False
-IS_PYOPENSSL = False
-IS_SECURETRANSPORT = False
-
-# Maps the length of a digest to a possible hash function producing this digest
-HASHFUNC_MAP = {
- 32: md5,
- 40: sha1,
- 64: sha256,
-}
-
-
-def _const_compare_digest_backport(a, b):
- """
- Compare two digests of equal length in constant time.
-
- The digests must be of type str/bytes.
- Returns True if the digests match, and False otherwise.
- """
- result = abs(len(a) - len(b))
- for l, r in zip(bytearray(a), bytearray(b)):
- result |= l ^ r
- return result == 0
-
-
-_const_compare_digest = getattr(hmac, 'compare_digest',
- _const_compare_digest_backport)
-
-
-try: # Test for SSL features
- import ssl
- from ssl import wrap_socket, CERT_NONE, PROTOCOL_SSLv23
- from ssl import HAS_SNI # Has SNI?
-except ImportError:
- pass
-
-
-try:
- from ssl import OP_NO_SSLv2, OP_NO_SSLv3, OP_NO_COMPRESSION
-except ImportError:
- OP_NO_SSLv2, OP_NO_SSLv3 = 0x1000000, 0x2000000
- OP_NO_COMPRESSION = 0x20000
-
-# A secure default.
-# Sources for more information on TLS ciphers:
-#
-# - https://wiki.mozilla.org/Security/Server_Side_TLS
-# - https://www.ssllabs.com/projects/best-practices/index.html
-# - https://hynek.me/articles/hardening-your-web-servers-ssl-ciphers/
-#
-# The general intent is:
-# - Prefer cipher suites that offer perfect forward secrecy (DHE/ECDHE),
-# - prefer ECDHE over DHE for better performance,
-# - prefer any AES-GCM and ChaCha20 over any AES-CBC for better performance and
-# security,
-# - prefer AES-GCM over ChaCha20 because hardware-accelerated AES is common,
-# - disable NULL authentication, MD5 MACs and DSS for security reasons.
-DEFAULT_CIPHERS = ':'.join([
- 'ECDH+AESGCM',
- 'ECDH+CHACHA20',
- 'DH+AESGCM',
- 'DH+CHACHA20',
- 'ECDH+AES256',
- 'DH+AES256',
- 'ECDH+AES128',
- 'DH+AES',
- 'RSA+AESGCM',
- 'RSA+AES',
- '!aNULL',
- '!eNULL',
- '!MD5',
-])
-
-try:
- from ssl import SSLContext # Modern SSL?
-except ImportError:
- import sys
-
- class SSLContext(object): # Platform-specific: Python 2 & 3.1
- supports_set_ciphers = ((2, 7) <= sys.version_info < (3,) or
- (3, 2) <= sys.version_info)
-
- def __init__(self, protocol_version):
- self.protocol = protocol_version
- # Use default values from a real SSLContext
- self.check_hostname = False
- self.verify_mode = ssl.CERT_NONE
- self.ca_certs = None
- self.options = 0
- self.certfile = None
- self.keyfile = None
- self.ciphers = None
-
- def load_cert_chain(self, certfile, keyfile):
- self.certfile = certfile
- self.keyfile = keyfile
-
- def load_verify_locations(self, cafile=None, capath=None):
- self.ca_certs = cafile
-
- if capath is not None:
- raise SSLError("CA directories not supported in older Pythons")
-
- def set_ciphers(self, cipher_suite):
- if not self.supports_set_ciphers:
- raise TypeError(
- 'Your version of Python does not support setting '
- 'a custom cipher suite. Please upgrade to Python '
- '2.7, 3.2, or later if you need this functionality.'
- )
- self.ciphers = cipher_suite
-
- def wrap_socket(self, socket, server_hostname=None, server_side=False):
- warnings.warn(
- 'A true SSLContext object is not available. This prevents '
- 'urllib3 from configuring SSL appropriately and may cause '
- 'certain SSL connections to fail. You can upgrade to a newer '
- 'version of Python to solve this. For more information, see '
- 'https://urllib3.readthedocs.io/en/latest/advanced-usage.html'
- '#ssl-warnings',
- InsecurePlatformWarning
- )
- kwargs = {
- 'keyfile': self.keyfile,
- 'certfile': self.certfile,
- 'ca_certs': self.ca_certs,
- 'cert_reqs': self.verify_mode,
- 'ssl_version': self.protocol,
- 'server_side': server_side,
- }
- if self.supports_set_ciphers: # Platform-specific: Python 2.7+
- return wrap_socket(socket, ciphers=self.ciphers, **kwargs)
- else: # Platform-specific: Python 2.6
- return wrap_socket(socket, **kwargs)
-
-
-def assert_fingerprint(cert, fingerprint):
- """
- Checks if given fingerprint matches the supplied certificate.
-
- :param cert:
- Certificate as bytes object.
- :param fingerprint:
- Fingerprint as string of hexdigits, can be interspersed by colons.
- """
-
- fingerprint = fingerprint.replace(':', '').lower()
- digest_length = len(fingerprint)
- hashfunc = HASHFUNC_MAP.get(digest_length)
- if not hashfunc:
- raise SSLError(
- 'Fingerprint of invalid length: {0}'.format(fingerprint))
-
- # We need encode() here for py32; works on py2 and p33.
- fingerprint_bytes = unhexlify(fingerprint.encode())
-
- cert_digest = hashfunc(cert).digest()
-
- if not _const_compare_digest(cert_digest, fingerprint_bytes):
- raise SSLError('Fingerprints did not match. Expected "{0}", got "{1}".'
- .format(fingerprint, hexlify(cert_digest)))
-
-
-def resolve_cert_reqs(candidate):
- """
- Resolves the argument to a numeric constant, which can be passed to
- the wrap_socket function/method from the ssl module.
- Defaults to :data:`ssl.CERT_NONE`.
- If given a string it is assumed to be the name of the constant in the
- :mod:`ssl` module or its abbrevation.
- (So you can specify `REQUIRED` instead of `CERT_REQUIRED`.
- If it's neither `None` nor a string we assume it is already the numeric
- constant which can directly be passed to wrap_socket.
- """
- if candidate is None:
- return CERT_NONE
-
- if isinstance(candidate, str):
- res = getattr(ssl, candidate, None)
- if res is None:
- res = getattr(ssl, 'CERT_' + candidate)
- return res
-
- return candidate
-
-
-def resolve_ssl_version(candidate):
- """
- like resolve_cert_reqs
- """
- if candidate is None:
- return PROTOCOL_SSLv23
-
- if isinstance(candidate, str):
- res = getattr(ssl, candidate, None)
- if res is None:
- res = getattr(ssl, 'PROTOCOL_' + candidate)
- return res
-
- return candidate
-
-
-def create_urllib3_context(ssl_version=None, cert_reqs=None,
- options=None, ciphers=None):
- """All arguments have the same meaning as ``ssl_wrap_socket``.
-
- By default, this function does a lot of the same work that
- ``ssl.create_default_context`` does on Python 3.4+. It:
-
- - Disables SSLv2, SSLv3, and compression
- - Sets a restricted set of server ciphers
-
- If you wish to enable SSLv3, you can do::
-
- from urllib3.util import ssl_
- context = ssl_.create_urllib3_context()
- context.options &= ~ssl_.OP_NO_SSLv3
-
- You can do the same to enable compression (substituting ``COMPRESSION``
- for ``SSLv3`` in the last line above).
-
- :param ssl_version:
- The desired protocol version to use. This will default to
- PROTOCOL_SSLv23 which will negotiate the highest protocol that both
- the server and your installation of OpenSSL support.
- :param cert_reqs:
- Whether to require the certificate verification. This defaults to
- ``ssl.CERT_REQUIRED``.
- :param options:
- Specific OpenSSL options. These default to ``ssl.OP_NO_SSLv2``,
- ``ssl.OP_NO_SSLv3``, ``ssl.OP_NO_COMPRESSION``.
- :param ciphers:
- Which cipher suites to allow the server to select.
- :returns:
- Constructed SSLContext object with specified options
- :rtype: SSLContext
- """
- context = SSLContext(ssl_version or ssl.PROTOCOL_SSLv23)
-
- # Setting the default here, as we may have no ssl module on import
- cert_reqs = ssl.CERT_REQUIRED if cert_reqs is None else cert_reqs
-
- if options is None:
- options = 0
- # SSLv2 is easily broken and is considered harmful and dangerous
- options |= OP_NO_SSLv2
- # SSLv3 has several problems and is now dangerous
- options |= OP_NO_SSLv3
- # Disable compression to prevent CRIME attacks for OpenSSL 1.0+
- # (issue #309)
- options |= OP_NO_COMPRESSION
-
- context.options |= options
-
- if getattr(context, 'supports_set_ciphers', True): # Platform-specific: Python 2.6
- context.set_ciphers(ciphers or DEFAULT_CIPHERS)
-
- context.verify_mode = cert_reqs
- if getattr(context, 'check_hostname', None) is not None: # Platform-specific: Python 3.2
- # We do our own verification, including fingerprints and alternative
- # hostnames. So disable it here
- context.check_hostname = False
- return context
-
-
-def ssl_wrap_socket(sock, keyfile=None, certfile=None, cert_reqs=None,
- ca_certs=None, server_hostname=None,
- ssl_version=None, ciphers=None, ssl_context=None,
- ca_cert_dir=None):
- """
- All arguments except for server_hostname, ssl_context, and ca_cert_dir have
- the same meaning as they do when using :func:`ssl.wrap_socket`.
-
- :param server_hostname:
- When SNI is supported, the expected hostname of the certificate
- :param ssl_context:
- A pre-made :class:`SSLContext` object. If none is provided, one will
- be created using :func:`create_urllib3_context`.
- :param ciphers:
- A string of ciphers we wish the client to support. This is not
- supported on Python 2.6 as the ssl module does not support it.
- :param ca_cert_dir:
- A directory containing CA certificates in multiple separate files, as
- supported by OpenSSL's -CApath flag or the capath argument to
- SSLContext.load_verify_locations().
- """
- context = ssl_context
- if context is None:
- # Note: This branch of code and all the variables in it are no longer
- # used by urllib3 itself. We should consider deprecating and removing
- # this code.
- context = create_urllib3_context(ssl_version, cert_reqs,
- ciphers=ciphers)
-
- if ca_certs or ca_cert_dir:
- try:
- context.load_verify_locations(ca_certs, ca_cert_dir)
- except IOError as e: # Platform-specific: Python 2.6, 2.7, 3.2
- raise SSLError(e)
- # Py33 raises FileNotFoundError which subclasses OSError
- # These are not equivalent unless we check the errno attribute
- except OSError as e: # Platform-specific: Python 3.3 and beyond
- if e.errno == errno.ENOENT:
- raise SSLError(e)
- raise
- elif getattr(context, 'load_default_certs', None) is not None:
- # try to load OS default certs; works well on Windows (require Python3.4+)
- context.load_default_certs()
-
- if certfile:
- context.load_cert_chain(certfile, keyfile)
- if HAS_SNI: # Platform-specific: OpenSSL with enabled SNI
- return context.wrap_socket(sock, server_hostname=server_hostname)
-
- warnings.warn(
- 'An HTTPS request has been made, but the SNI (Subject Name '
- 'Indication) extension to TLS is not available on this platform. '
- 'This may cause the server to present an incorrect TLS '
- 'certificate, which can cause validation failures. You can upgrade to '
- 'a newer version of Python to solve this. For more information, see '
- 'https://urllib3.readthedocs.io/en/latest/advanced-usage.html'
- '#ssl-warnings',
- SNIMissingWarning
- )
- return context.wrap_socket(sock)
diff --git a/src/collectors/python.d.plugin/python_modules/urllib3/util/timeout.py b/src/collectors/python.d.plugin/python_modules/urllib3/util/timeout.py
deleted file mode 100644
index 4041cf9b9..000000000
--- a/src/collectors/python.d.plugin/python_modules/urllib3/util/timeout.py
+++ /dev/null
@@ -1,243 +0,0 @@
-# SPDX-License-Identifier: MIT
-from __future__ import absolute_import
-# The default socket timeout, used by httplib to indicate that no timeout was
-# specified by the user
-from socket import _GLOBAL_DEFAULT_TIMEOUT
-import time
-
-from ..exceptions import TimeoutStateError
-
-# A sentinel value to indicate that no timeout was specified by the user in
-# urllib3
-_Default = object()
-
-
-# Use time.monotonic if available.
-current_time = getattr(time, "monotonic", time.time)
-
-
-class Timeout(object):
- """ Timeout configuration.
-
- Timeouts can be defined as a default for a pool::
-
- timeout = Timeout(connect=2.0, read=7.0)
- http = PoolManager(timeout=timeout)
- response = http.request('GET', 'http://example.com/')
-
- Or per-request (which overrides the default for the pool)::
-
- response = http.request('GET', 'http://example.com/', timeout=Timeout(10))
-
- Timeouts can be disabled by setting all the parameters to ``None``::
-
- no_timeout = Timeout(connect=None, read=None)
- response = http.request('GET', 'http://example.com/, timeout=no_timeout)
-
-
- :param total:
- This combines the connect and read timeouts into one; the read timeout
- will be set to the time leftover from the connect attempt. In the
- event that both a connect timeout and a total are specified, or a read
- timeout and a total are specified, the shorter timeout will be applied.
-
- Defaults to None.
-
- :type total: integer, float, or None
-
- :param connect:
- The maximum amount of time to wait for a connection attempt to a server
- to succeed. Omitting the parameter will default the connect timeout to
- the system default, probably `the global default timeout in socket.py
- <http://hg.python.org/cpython/file/603b4d593758/Lib/socket.py#l535>`_.
- None will set an infinite timeout for connection attempts.
-
- :type connect: integer, float, or None
-
- :param read:
- The maximum amount of time to wait between consecutive
- read operations for a response from the server. Omitting
- the parameter will default the read timeout to the system
- default, probably `the global default timeout in socket.py
- <http://hg.python.org/cpython/file/603b4d593758/Lib/socket.py#l535>`_.
- None will set an infinite timeout.
-
- :type read: integer, float, or None
-
- .. note::
-
- Many factors can affect the total amount of time for urllib3 to return
- an HTTP response.
-
- For example, Python's DNS resolver does not obey the timeout specified
- on the socket. Other factors that can affect total request time include
- high CPU load, high swap, the program running at a low priority level,
- or other behaviors.
-
- In addition, the read and total timeouts only measure the time between
- read operations on the socket connecting the client and the server,
- not the total amount of time for the request to return a complete
- response. For most requests, the timeout is raised because the server
- has not sent the first byte in the specified time. This is not always
- the case; if a server streams one byte every fifteen seconds, a timeout
- of 20 seconds will not trigger, even though the request will take
- several minutes to complete.
-
- If your goal is to cut off any request after a set amount of wall clock
- time, consider having a second "watcher" thread to cut off a slow
- request.
- """
-
- #: A sentinel object representing the default timeout value
- DEFAULT_TIMEOUT = _GLOBAL_DEFAULT_TIMEOUT
-
- def __init__(self, total=None, connect=_Default, read=_Default):
- self._connect = self._validate_timeout(connect, 'connect')
- self._read = self._validate_timeout(read, 'read')
- self.total = self._validate_timeout(total, 'total')
- self._start_connect = None
-
- def __str__(self):
- return '%s(connect=%r, read=%r, total=%r)' % (
- type(self).__name__, self._connect, self._read, self.total)
-
- @classmethod
- def _validate_timeout(cls, value, name):
- """ Check that a timeout attribute is valid.
-
- :param value: The timeout value to validate
- :param name: The name of the timeout attribute to validate. This is
- used to specify in error messages.
- :return: The validated and casted version of the given value.
- :raises ValueError: If it is a numeric value less than or equal to
- zero, or the type is not an integer, float, or None.
- """
- if value is _Default:
- return cls.DEFAULT_TIMEOUT
-
- if value is None or value is cls.DEFAULT_TIMEOUT:
- return value
-
- if isinstance(value, bool):
- raise ValueError("Timeout cannot be a boolean value. It must "
- "be an int, float or None.")
- try:
- float(value)
- except (TypeError, ValueError):
- raise ValueError("Timeout value %s was %s, but it must be an "
- "int, float or None." % (name, value))
-
- try:
- if value <= 0:
- raise ValueError("Attempted to set %s timeout to %s, but the "
- "timeout cannot be set to a value less "
- "than or equal to 0." % (name, value))
- except TypeError: # Python 3
- raise ValueError("Timeout value %s was %s, but it must be an "
- "int, float or None." % (name, value))
-
- return value
-
- @classmethod
- def from_float(cls, timeout):
- """ Create a new Timeout from a legacy timeout value.
-
- The timeout value used by httplib.py sets the same timeout on the
- connect(), and recv() socket requests. This creates a :class:`Timeout`
- object that sets the individual timeouts to the ``timeout`` value
- passed to this function.
-
- :param timeout: The legacy timeout value.
- :type timeout: integer, float, sentinel default object, or None
- :return: Timeout object
- :rtype: :class:`Timeout`
- """
- return Timeout(read=timeout, connect=timeout)
-
- def clone(self):
- """ Create a copy of the timeout object
-
- Timeout properties are stored per-pool but each request needs a fresh
- Timeout object to ensure each one has its own start/stop configured.
-
- :return: a copy of the timeout object
- :rtype: :class:`Timeout`
- """
- # We can't use copy.deepcopy because that will also create a new object
- # for _GLOBAL_DEFAULT_TIMEOUT, which socket.py uses as a sentinel to
- # detect the user default.
- return Timeout(connect=self._connect, read=self._read,
- total=self.total)
-
- def start_connect(self):
- """ Start the timeout clock, used during a connect() attempt
-
- :raises urllib3.exceptions.TimeoutStateError: if you attempt
- to start a timer that has been started already.
- """
- if self._start_connect is not None:
- raise TimeoutStateError("Timeout timer has already been started.")
- self._start_connect = current_time()
- return self._start_connect
-
- def get_connect_duration(self):
- """ Gets the time elapsed since the call to :meth:`start_connect`.
-
- :return: Elapsed time.
- :rtype: float
- :raises urllib3.exceptions.TimeoutStateError: if you attempt
- to get duration for a timer that hasn't been started.
- """
- if self._start_connect is None:
- raise TimeoutStateError("Can't get connect duration for timer "
- "that has not started.")
- return current_time() - self._start_connect
-
- @property
- def connect_timeout(self):
- """ Get the value to use when setting a connection timeout.
-
- This will be a positive float or integer, the value None
- (never timeout), or the default system timeout.
-
- :return: Connect timeout.
- :rtype: int, float, :attr:`Timeout.DEFAULT_TIMEOUT` or None
- """
- if self.total is None:
- return self._connect
-
- if self._connect is None or self._connect is self.DEFAULT_TIMEOUT:
- return self.total
-
- return min(self._connect, self.total)
-
- @property
- def read_timeout(self):
- """ Get the value for the read timeout.
-
- This assumes some time has elapsed in the connection timeout and
- computes the read timeout appropriately.
-
- If self.total is set, the read timeout is dependent on the amount of
- time taken by the connect timeout. If the connection time has not been
- established, a :exc:`~urllib3.exceptions.TimeoutStateError` will be
- raised.
-
- :return: Value to use for the read timeout.
- :rtype: int, float, :attr:`Timeout.DEFAULT_TIMEOUT` or None
- :raises urllib3.exceptions.TimeoutStateError: If :meth:`start_connect`
- has not yet been called on this object.
- """
- if (self.total is not None and
- self.total is not self.DEFAULT_TIMEOUT and
- self._read is not None and
- self._read is not self.DEFAULT_TIMEOUT):
- # In case the connect timeout has not yet been established.
- if self._start_connect is None:
- return self._read
- return max(0, min(self.total - self.get_connect_duration(),
- self._read))
- elif self.total is not None and self.total is not self.DEFAULT_TIMEOUT:
- return max(0, self.total - self.get_connect_duration())
- else:
- return self._read
diff --git a/src/collectors/python.d.plugin/python_modules/urllib3/util/url.py b/src/collectors/python.d.plugin/python_modules/urllib3/util/url.py
deleted file mode 100644
index 99fd6534a..000000000
--- a/src/collectors/python.d.plugin/python_modules/urllib3/util/url.py
+++ /dev/null
@@ -1,231 +0,0 @@
-# SPDX-License-Identifier: MIT
-from __future__ import absolute_import
-from collections import namedtuple
-
-from ..exceptions import LocationParseError
-
-
-url_attrs = ['scheme', 'auth', 'host', 'port', 'path', 'query', 'fragment']
-
-# We only want to normalize urls with an HTTP(S) scheme.
-# urllib3 infers URLs without a scheme (None) to be http.
-NORMALIZABLE_SCHEMES = ('http', 'https', None)
-
-
-class Url(namedtuple('Url', url_attrs)):
- """
- Datastructure for representing an HTTP URL. Used as a return value for
- :func:`parse_url`. Both the scheme and host are normalized as they are
- both case-insensitive according to RFC 3986.
- """
- __slots__ = ()
-
- def __new__(cls, scheme=None, auth=None, host=None, port=None, path=None,
- query=None, fragment=None):
- if path and not path.startswith('/'):
- path = '/' + path
- if scheme:
- scheme = scheme.lower()
- if host and scheme in NORMALIZABLE_SCHEMES:
- host = host.lower()
- return super(Url, cls).__new__(cls, scheme, auth, host, port, path,
- query, fragment)
-
- @property
- def hostname(self):
- """For backwards-compatibility with urlparse. We're nice like that."""
- return self.host
-
- @property
- def request_uri(self):
- """Absolute path including the query string."""
- uri = self.path or '/'
-
- if self.query is not None:
- uri += '?' + self.query
-
- return uri
-
- @property
- def netloc(self):
- """Network location including host and port"""
- if self.port:
- return '%s:%d' % (self.host, self.port)
- return self.host
-
- @property
- def url(self):
- """
- Convert self into a url
-
- This function should more or less round-trip with :func:`.parse_url`. The
- returned url may not be exactly the same as the url inputted to
- :func:`.parse_url`, but it should be equivalent by the RFC (e.g., urls
- with a blank port will have : removed).
-
- Example: ::
-
- >>> U = parse_url('http://google.com/mail/')
- >>> U.url
- 'http://google.com/mail/'
- >>> Url('http', 'username:password', 'host.com', 80,
- ... '/path', 'query', 'fragment').url
- 'http://username:password@host.com:80/path?query#fragment'
- """
- scheme, auth, host, port, path, query, fragment = self
- url = ''
-
- # We use "is not None" we want things to happen with empty strings (or 0 port)
- if scheme is not None:
- url += scheme + '://'
- if auth is not None:
- url += auth + '@'
- if host is not None:
- url += host
- if port is not None:
- url += ':' + str(port)
- if path is not None:
- url += path
- if query is not None:
- url += '?' + query
- if fragment is not None:
- url += '#' + fragment
-
- return url
-
- def __str__(self):
- return self.url
-
-
-def split_first(s, delims):
- """
- Given a string and an iterable of delimiters, split on the first found
- delimiter. Return two split parts and the matched delimiter.
-
- If not found, then the first part is the full input string.
-
- Example::
-
- >>> split_first('foo/bar?baz', '?/=')
- ('foo', 'bar?baz', '/')
- >>> split_first('foo/bar?baz', '123')
- ('foo/bar?baz', '', None)
-
- Scales linearly with number of delims. Not ideal for large number of delims.
- """
- min_idx = None
- min_delim = None
- for d in delims:
- idx = s.find(d)
- if idx < 0:
- continue
-
- if min_idx is None or idx < min_idx:
- min_idx = idx
- min_delim = d
-
- if min_idx is None or min_idx < 0:
- return s, '', None
-
- return s[:min_idx], s[min_idx + 1:], min_delim
-
-
-def parse_url(url):
- """
- Given a url, return a parsed :class:`.Url` namedtuple. Best-effort is
- performed to parse incomplete urls. Fields not provided will be None.
-
- Partly backwards-compatible with :mod:`urlparse`.
-
- Example::
-
- >>> parse_url('http://google.com/mail/')
- Url(scheme='http', host='google.com', port=None, path='/mail/', ...)
- >>> parse_url('google.com:80')
- Url(scheme=None, host='google.com', port=80, path=None, ...)
- >>> parse_url('/foo?bar')
- Url(scheme=None, host=None, port=None, path='/foo', query='bar', ...)
- """
-
- # While this code has overlap with stdlib's urlparse, it is much
- # simplified for our needs and less annoying.
- # Additionally, this implementations does silly things to be optimal
- # on CPython.
-
- if not url:
- # Empty
- return Url()
-
- scheme = None
- auth = None
- host = None
- port = None
- path = None
- fragment = None
- query = None
-
- # Scheme
- if '://' in url:
- scheme, url = url.split('://', 1)
-
- # Find the earliest Authority Terminator
- # (http://tools.ietf.org/html/rfc3986#section-3.2)
- url, path_, delim = split_first(url, ['/', '?', '#'])
-
- if delim:
- # Reassemble the path
- path = delim + path_
-
- # Auth
- if '@' in url:
- # Last '@' denotes end of auth part
- auth, url = url.rsplit('@', 1)
-
- # IPv6
- if url and url[0] == '[':
- host, url = url.split(']', 1)
- host += ']'
-
- # Port
- if ':' in url:
- _host, port = url.split(':', 1)
-
- if not host:
- host = _host
-
- if port:
- # If given, ports must be integers. No whitespace, no plus or
- # minus prefixes, no non-integer digits such as ^2 (superscript).
- if not port.isdigit():
- raise LocationParseError(url)
- try:
- port = int(port)
- except ValueError:
- raise LocationParseError(url)
- else:
- # Blank ports are cool, too. (rfc3986#section-3.2.3)
- port = None
-
- elif not host and url:
- host = url
-
- if not path:
- return Url(scheme, auth, host, port, path, query, fragment)
-
- # Fragment
- if '#' in path:
- path, fragment = path.split('#', 1)
-
- # Query
- if '?' in path:
- path, query = path.split('?', 1)
-
- return Url(scheme, auth, host, port, path, query, fragment)
-
-
-def get_host(url):
- """
- Deprecated. Use :func:`parse_url` instead.
- """
- p = parse_url(url)
- return p.scheme or 'http', p.hostname, p.port
diff --git a/src/collectors/python.d.plugin/python_modules/urllib3/util/wait.py b/src/collectors/python.d.plugin/python_modules/urllib3/util/wait.py
deleted file mode 100644
index 21e72979c..000000000
--- a/src/collectors/python.d.plugin/python_modules/urllib3/util/wait.py
+++ /dev/null
@@ -1,41 +0,0 @@
-# SPDX-License-Identifier: MIT
-from .selectors import (
- HAS_SELECT,
- DefaultSelector,
- EVENT_READ,
- EVENT_WRITE
-)
-
-
-def _wait_for_io_events(socks, events, timeout=None):
- """ Waits for IO events to be available from a list of sockets
- or optionally a single socket if passed in. Returns a list of
- sockets that can be interacted with immediately. """
- if not HAS_SELECT:
- raise ValueError('Platform does not have a selector')
- if not isinstance(socks, list):
- # Probably just a single socket.
- if hasattr(socks, "fileno"):
- socks = [socks]
- # Otherwise it might be a non-list iterable.
- else:
- socks = list(socks)
- with DefaultSelector() as selector:
- for sock in socks:
- selector.register(sock, events)
- return [key[0].fileobj for key in
- selector.select(timeout) if key[1] & events]
-
-
-def wait_for_read(socks, timeout=None):
- """ Waits for reading to be available from a list of sockets
- or optionally a single socket if passed in. Returns a list of
- sockets that can be read from immediately. """
- return _wait_for_io_events(socks, EVENT_READ, timeout)
-
-
-def wait_for_write(socks, timeout=None):
- """ Waits for writing to be available from a list of sockets
- or optionally a single socket if passed in. Returns a list of
- sockets that can be written to immediately. """
- return _wait_for_io_events(socks, EVENT_WRITE, timeout)
diff --git a/src/collectors/python.d.plugin/samba/README.md b/src/collectors/python.d.plugin/samba/README.md
deleted file mode 120000
index 3b63bbab6..000000000
--- a/src/collectors/python.d.plugin/samba/README.md
+++ /dev/null
@@ -1 +0,0 @@
-integrations/samba.md \ No newline at end of file
diff --git a/src/collectors/python.d.plugin/samba/integrations/samba.md b/src/collectors/python.d.plugin/samba/integrations/samba.md
deleted file mode 100644
index 4d6f8fcc3..000000000
--- a/src/collectors/python.d.plugin/samba/integrations/samba.md
+++ /dev/null
@@ -1,255 +0,0 @@
-<!--startmeta
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/collectors/python.d.plugin/samba/README.md"
-meta_yaml: "https://github.com/netdata/netdata/edit/master/src/collectors/python.d.plugin/samba/metadata.yaml"
-sidebar_label: "Samba"
-learn_status: "Published"
-learn_rel_path: "Collecting Metrics/Storage, Mount Points and Filesystems"
-most_popular: False
-message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
-endmeta-->
-
-# Samba
-
-
-<img src="https://netdata.cloud/img/samba.svg" width="150"/>
-
-
-Plugin: python.d.plugin
-Module: samba
-
-<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
-
-## Overview
-
-This collector monitors the performance metrics of Samba file sharing.
-
-It is using the `smbstatus` command-line tool.
-
-Executed commands:
-
-- `sudo -n smbstatus -P`
-
-
-This collector is supported on all platforms.
-
-This collector only supports collecting metrics from a single instance of this integration.
-
-`smbstatus` is used, which can only be executed by `root`. It uses `sudo` and assumes that it is configured such that the `netdata` user can execute `smbstatus` as root without a password.
-
-
-### Default Behavior
-
-#### Auto-Detection
-
-After all the permissions are satisfied, the `smbstatus -P` binary is executed.
-
-#### Limits
-
-The default configuration for this integration does not impose any limits on data collection.
-
-#### Performance Impact
-
-The default configuration for this integration is not expected to impose a significant performance impact on the system.
-
-
-## Metrics
-
-Metrics grouped by *scope*.
-
-The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
-
-
-
-### Per Samba instance
-
-These metrics refer to the entire monitored application.
-
-This scope has no labels.
-
-Metrics:
-
-| Metric | Dimensions | Unit |
-|:------|:----------|:----|
-| syscall.rw | sendfile, recvfile | KiB/s |
-| smb2.rw | readout, writein, readin, writeout | KiB/s |
-| smb2.create_close | create, close | operations/s |
-| smb2.get_set_info | getinfo, setinfo | operations/s |
-| smb2.find | find | operations/s |
-| smb2.notify | notify | operations/s |
-| smb2.sm_counters | tcon, negprot, tdis, cancel, logoff, flush, lock, keepalive, break, sessetup | count |
-
-
-
-## Alerts
-
-There are no alerts configured by default for this integration.
-
-
-## Setup
-
-### Prerequisites
-
-#### Enable the samba collector
-
-The `samba` collector is disabled by default. To enable it, use `edit-config` from the Netdata [config directory](/docs/netdata-agent/configuration/README.md), which is typically at `/etc/netdata`, to edit the `python.d.conf` file.
-
-```bash
-cd /etc/netdata # Replace this path with your Netdata config directory, if different
-sudo ./edit-config python.d.conf
-```
-Change the value of the `samba` setting to `yes`. Save the file and restart the Netdata Agent with `sudo systemctl restart netdata`, or the [appropriate method](/packaging/installer/README.md#maintaining-a-netdata-agent-installation) for your system.
-
-
-#### Permissions and programs
-
-To run the collector you need:
-
-- `smbstatus` program
-- `sudo` program
-- `smbd` must be compiled with profiling enabled
-- `smbd` must be started either with the `-P 1` option or inside `smb.conf` using `smbd profiling level`
-
-The module uses `smbstatus`, which can only be executed by `root`. It uses `sudo` and assumes that it is configured such that the `netdata` user can execute `smbstatus` as root without a password.
-
-- add to your `/etc/sudoers` file:
-
- `which smbstatus` shows the full path to the binary.
-
- ```bash
- netdata ALL=(root) NOPASSWD: /path/to/smbstatus
- ```
-
-- Reset Netdata's systemd unit [CapabilityBoundingSet](https://www.freedesktop.org/software/systemd/man/systemd.exec.html#Capabilities) (Linux distributions with systemd)
-
- The default CapabilityBoundingSet doesn't allow using `sudo`, and is quite strict in general. Resetting is not optimal, but a next-best solution given the inability to execute `smbstatus` using `sudo`.
-
-
- As the `root` user, do the following:
-
- ```cmd
- mkdir /etc/systemd/system/netdata.service.d
- echo -e '[Service]\nCapabilityBoundingSet=~' | tee /etc/systemd/system/netdata.service.d/unset-capability-bounding-set.conf
- systemctl daemon-reload
- systemctl restart netdata.service
- ```
-
-
-
-### Configuration
-
-#### File
-
-The configuration file name for this integration is `python.d/samba.conf`.
-
-
-You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
-
-```bash
-cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
-sudo ./edit-config python.d/samba.conf
-```
-#### Options
-
-There are 2 sections:
-
-* Global variables
-* One or more JOBS that can define multiple different instances to monitor.
-
-The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.
-
-Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition.
-
-Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.
-
-
-<details open><summary>Config options</summary>
-
-| Name | Description | Default | Required |
-|:----|:-----------|:-------|:--------:|
-| update_every | Sets the default data collection frequency. | 5 | no |
-| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |
-| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |
-| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |
-
-</details>
-
-#### Examples
-
-##### Basic
-
-A basic example configuration.
-
-<details open><summary>Config</summary>
-
-```yaml
-my_job_name:
- name: my_name
- update_every: 1
-
-```
-</details>
-
-
-
-## Troubleshooting
-
-### Debug Mode
-
-
-To troubleshoot issues with the `samba` collector, run the `python.d.plugin` with the debug option enabled. The output
-should give you clues as to why the collector isn't working.
-
-- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
- your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
-
- ```bash
- cd /usr/libexec/netdata/plugins.d/
- ```
-
-- Switch to the `netdata` user.
-
- ```bash
- sudo -u netdata -s
- ```
-
-- Run the `python.d.plugin` to debug the collector:
-
- ```bash
- ./python.d.plugin samba debug trace
- ```
-
-### Getting Logs
-
-If you're encountering problems with the `samba` collector, follow these steps to retrieve logs and identify potential issues:
-
-- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
-- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
-
-#### System with systemd
-
-Use the following command to view logs generated since the last Netdata service restart:
-
-```bash
-journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep samba
-```
-
-#### System without systemd
-
-Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
-
-```bash
-grep samba /var/log/netdata/collector.log
-```
-
-**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
-
-#### Docker Container
-
-If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
-
-```bash
-docker logs netdata 2>&1 | grep samba
-```
-
-
diff --git a/src/collectors/python.d.plugin/samba/metadata.yaml b/src/collectors/python.d.plugin/samba/metadata.yaml
deleted file mode 100644
index 09c04e7d4..000000000
--- a/src/collectors/python.d.plugin/samba/metadata.yaml
+++ /dev/null
@@ -1,205 +0,0 @@
-plugin_name: python.d.plugin
-modules:
- - meta:
- plugin_name: python.d.plugin
- module_name: samba
- monitored_instance:
- name: Samba
- link: https://www.samba.org/samba/
- categories:
- - data-collection.storage-mount-points-and-filesystems
- icon_filename: "samba.svg"
- related_resources:
- integrations:
- list: []
- info_provided_to_referring_integrations:
- description: ""
- keywords:
- - samba
- - file sharing
- most_popular: false
- overview:
- data_collection:
- metrics_description: "This collector monitors the performance metrics of Samba file sharing."
- method_description: |
- It is using the `smbstatus` command-line tool.
-
- Executed commands:
-
- - `sudo -n smbstatus -P`
- supported_platforms:
- include: []
- exclude: []
- multi_instance: false
- additional_permissions:
- description: |
- `smbstatus` is used, which can only be executed by `root`. It uses `sudo` and assumes that it is configured such that the `netdata` user can execute `smbstatus` as root without a password.
- default_behavior:
- auto_detection:
- description: "After all the permissions are satisfied, the `smbstatus -P` binary is executed."
- limits:
- description: ""
- performance_impact:
- description: ""
- setup:
- prerequisites:
- list:
- - title: Enable the samba collector
- description: |
- The `samba` collector is disabled by default. To enable it, use `edit-config` from the Netdata [config directory](/docs/netdata-agent/configuration/README.md), which is typically at `/etc/netdata`, to edit the `python.d.conf` file.
-
- ```bash
- cd /etc/netdata # Replace this path with your Netdata config directory, if different
- sudo ./edit-config python.d.conf
- ```
- Change the value of the `samba` setting to `yes`. Save the file and restart the Netdata Agent with `sudo systemctl restart netdata`, or the [appropriate method](/packaging/installer/README.md#maintaining-a-netdata-agent-installation) for your system.
- - title: Permissions and programs
- description: |
- To run the collector you need:
-
- - `smbstatus` program
- - `sudo` program
- - `smbd` must be compiled with profiling enabled
- - `smbd` must be started either with the `-P 1` option or inside `smb.conf` using `smbd profiling level`
-
- The module uses `smbstatus`, which can only be executed by `root`. It uses `sudo` and assumes that it is configured such that the `netdata` user can execute `smbstatus` as root without a password.
-
- - add to your `/etc/sudoers` file:
-
- `which smbstatus` shows the full path to the binary.
-
- ```bash
- netdata ALL=(root) NOPASSWD: /path/to/smbstatus
- ```
-
- - Reset Netdata's systemd unit [CapabilityBoundingSet](https://www.freedesktop.org/software/systemd/man/systemd.exec.html#Capabilities) (Linux distributions with systemd)
-
- The default CapabilityBoundingSet doesn't allow using `sudo`, and is quite strict in general. Resetting is not optimal, but a next-best solution given the inability to execute `smbstatus` using `sudo`.
-
-
- As the `root` user, do the following:
-
- ```cmd
- mkdir /etc/systemd/system/netdata.service.d
- echo -e '[Service]\nCapabilityBoundingSet=~' | tee /etc/systemd/system/netdata.service.d/unset-capability-bounding-set.conf
- systemctl daemon-reload
- systemctl restart netdata.service
- ```
- configuration:
- file:
- name: python.d/samba.conf
- options:
- description: |
- There are 2 sections:
-
- * Global variables
- * One or more JOBS that can define multiple different instances to monitor.
-
- The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.
-
- Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition.
-
- Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.
- folding:
- title: "Config options"
- enabled: true
- list:
- - name: update_every
- description: Sets the default data collection frequency.
- default_value: 5
- required: false
- - name: priority
- description: Controls the order of charts at the netdata dashboard.
- default_value: 60000
- required: false
- - name: autodetection_retry
- description: Sets the job re-check interval in seconds.
- default_value: 0
- required: false
- - name: penalty
- description: Indicates whether to apply penalty to update_every in case of failures.
- default_value: yes
- required: false
- examples:
- folding:
- enabled: true
- title: "Config"
- list:
- - name: Basic
- description: A basic example configuration.
- config: |
- my_job_name:
- name: my_name
- update_every: 1
- troubleshooting:
- problems:
- list: []
- alerts: []
- metrics:
- folding:
- title: Metrics
- enabled: false
- description: ""
- availability: []
- scopes:
- - name: global
- description: "These metrics refer to the entire monitored application."
- labels: []
- metrics:
- - name: syscall.rw
- description: R/Ws
- unit: "KiB/s"
- chart_type: area
- dimensions:
- - name: sendfile
- - name: recvfile
- - name: smb2.rw
- description: R/Ws
- unit: "KiB/s"
- chart_type: area
- dimensions:
- - name: readout
- - name: writein
- - name: readin
- - name: writeout
- - name: smb2.create_close
- description: Create/Close
- unit: "operations/s"
- chart_type: line
- dimensions:
- - name: create
- - name: close
- - name: smb2.get_set_info
- description: Info
- unit: "operations/s"
- chart_type: line
- dimensions:
- - name: getinfo
- - name: setinfo
- - name: smb2.find
- description: Find
- unit: "operations/s"
- chart_type: line
- dimensions:
- - name: find
- - name: smb2.notify
- description: Notify
- unit: "operations/s"
- chart_type: line
- dimensions:
- - name: notify
- - name: smb2.sm_counters
- description: Lesser Ops
- unit: "count"
- chart_type: stacked
- dimensions:
- - name: tcon
- - name: negprot
- - name: tdis
- - name: cancel
- - name: logoff
- - name: flush
- - name: lock
- - name: keepalive
- - name: break
- - name: sessetup
diff --git a/src/collectors/python.d.plugin/samba/samba.chart.py b/src/collectors/python.d.plugin/samba/samba.chart.py
deleted file mode 100644
index 8eebcd60c..000000000
--- a/src/collectors/python.d.plugin/samba/samba.chart.py
+++ /dev/null
@@ -1,144 +0,0 @@
-# -*- coding: utf-8 -*-
-# Description: samba netdata python.d module
-# Author: Christopher Cox <chris_cox@endlessnow.com>
-# SPDX-License-Identifier: GPL-3.0-or-later
-#
-# The netdata user needs to be able to be able to sudo the smbstatus program
-# without password:
-# netdata ALL=(ALL) NOPASSWD: /usr/bin/smbstatus -P
-#
-# This makes calls to smbstatus -P
-#
-# This just looks at a couple of values out of syscall, and some from smb2.
-#
-# The Lesser Ops chart is merely a display of current counter values. They
-# didn't seem to change much to me. However, if you notice something changing
-# a lot there, bring one or more out into its own chart and make it incremental
-# (like find and notify... good examples).
-
-import re
-import os
-
-from bases.FrameworkServices.ExecutableService import ExecutableService
-from bases.collection import find_binary
-
-disabled_by_default = True
-
-update_every = 5
-
-ORDER = [
- 'syscall_rw',
- 'smb2_rw',
- 'smb2_create_close',
- 'smb2_info',
- 'smb2_find',
- 'smb2_notify',
- 'smb2_sm_count'
-]
-
-CHARTS = {
- 'syscall_rw': {
- 'options': [None, 'R/Ws', 'KiB/s', 'syscall', 'syscall.rw', 'area'],
- 'lines': [
- ['syscall_sendfile_bytes', 'sendfile', 'incremental', 1, 1024],
- ['syscall_recvfile_bytes', 'recvfile', 'incremental', -1, 1024]
- ]
- },
- 'smb2_rw': {
- 'options': [None, 'R/Ws', 'KiB/s', 'smb2', 'smb2.rw', 'area'],
- 'lines': [
- ['smb2_read_outbytes', 'readout', 'incremental', 1, 1024],
- ['smb2_write_inbytes', 'writein', 'incremental', -1, 1024],
- ['smb2_read_inbytes', 'readin', 'incremental', 1, 1024],
- ['smb2_write_outbytes', 'writeout', 'incremental', -1, 1024]
- ]
- },
- 'smb2_create_close': {
- 'options': [None, 'Create/Close', 'operations/s', 'smb2', 'smb2.create_close', 'line'],
- 'lines': [
- ['smb2_create_count', 'create', 'incremental', 1, 1],
- ['smb2_close_count', 'close', 'incremental', -1, 1]
- ]
- },
- 'smb2_info': {
- 'options': [None, 'Info', 'operations/s', 'smb2', 'smb2.get_set_info', 'line'],
- 'lines': [
- ['smb2_getinfo_count', 'getinfo', 'incremental', 1, 1],
- ['smb2_setinfo_count', 'setinfo', 'incremental', -1, 1]
- ]
- },
- 'smb2_find': {
- 'options': [None, 'Find', 'operations/s', 'smb2', 'smb2.find', 'line'],
- 'lines': [
- ['smb2_find_count', 'find', 'incremental', 1, 1]
- ]
- },
- 'smb2_notify': {
- 'options': [None, 'Notify', 'operations/s', 'smb2', 'smb2.notify', 'line'],
- 'lines': [
- ['smb2_notify_count', 'notify', 'incremental', 1, 1]
- ]
- },
- 'smb2_sm_count': {
- 'options': [None, 'Lesser Ops', 'count', 'smb2', 'smb2.sm_counters', 'stacked'],
- 'lines': [
- ['smb2_tcon_count', 'tcon', 'absolute', 1, 1],
- ['smb2_negprot_count', 'negprot', 'absolute', 1, 1],
- ['smb2_tdis_count', 'tdis', 'absolute', 1, 1],
- ['smb2_cancel_count', 'cancel', 'absolute', 1, 1],
- ['smb2_logoff_count', 'logoff', 'absolute', 1, 1],
- ['smb2_flush_count', 'flush', 'absolute', 1, 1],
- ['smb2_lock_count', 'lock', 'absolute', 1, 1],
- ['smb2_keepalive_count', 'keepalive', 'absolute', 1, 1],
- ['smb2_break_count', 'break', 'absolute', 1, 1],
- ['smb2_sessetup_count', 'sessetup', 'absolute', 1, 1]
- ]
- }
-}
-
-SUDO = 'sudo'
-SMBSTATUS = 'smbstatus'
-
-
-class Service(ExecutableService):
- def __init__(self, configuration=None, name=None):
- ExecutableService.__init__(self, configuration=configuration, name=name)
- self.order = ORDER
- self.definitions = CHARTS
- self.rgx_smb2 = re.compile(r'(smb2_[^:]+|syscall_.*file_bytes):\s+(\d+)')
-
- def check(self):
- smbstatus_binary = find_binary(SMBSTATUS)
- if not smbstatus_binary:
- self.error("can't locate '{0}' binary".format(SMBSTATUS))
- return False
-
- if os.getuid() == 0:
- self.command = ' '.join([smbstatus_binary, '-P'])
- return ExecutableService.check(self)
-
- sudo_binary = find_binary(SUDO)
- if not sudo_binary:
- self.error("can't locate '{0}' binary".format(SUDO))
- return False
- command = [sudo_binary, '-n', '-l', smbstatus_binary, '-P']
- smbstatus = '{0} -P'.format(smbstatus_binary)
- allowed = self._get_raw_data(command=command)
- if not (allowed and allowed[0].strip() == smbstatus):
- self.error("not allowed to run sudo for command '{0}'".format(smbstatus))
- return False
- self.command = ' '.join([sudo_binary, '-n', smbstatus_binary, '-P'])
- return ExecutableService.check(self)
-
- def _get_data(self):
- """
- Format data received from shell command
- :return: dict
- """
- raw_data = self._get_raw_data()
- if not raw_data:
- return None
-
- parsed = self.rgx_smb2.findall(' '.join(raw_data))
-
- return dict(parsed) or None
diff --git a/src/collectors/python.d.plugin/samba/samba.conf b/src/collectors/python.d.plugin/samba/samba.conf
deleted file mode 100644
index db15d4e9e..000000000
--- a/src/collectors/python.d.plugin/samba/samba.conf
+++ /dev/null
@@ -1,60 +0,0 @@
-# netdata python.d.plugin configuration for samba
-#
-# This file is in YaML format. Generally the format is:
-#
-# name: value
-#
-# There are 2 sections:
-# - global variables
-# - one or more JOBS
-#
-# JOBS allow you to collect values from multiple sources.
-# Each source will have its own set of charts.
-#
-# JOB parameters have to be indented (using spaces only, example below).
-
-# ----------------------------------------------------------------------
-# Global Variables
-# These variables set the defaults for all JOBs, however each JOB
-# may define its own, overriding the defaults.
-
-# update_every sets the default data collection frequency.
-# If unset, the python.d.plugin default is used.
-update_every: 5
-
-# priority controls the order of charts at the netdata dashboard.
-# Lower numbers move the charts towards the top of the page.
-# If unset, the default for python.d.plugin is used.
-# priority: 60000
-
-# penalty indicates whether to apply penalty to update_every in case of failures.
-# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
-# penalty: yes
-
-# autodetection_retry sets the job re-check interval in seconds.
-# The job is not deleted if check fails.
-# Attempts to start the job are made once every autodetection_retry.
-# This feature is disabled by default.
-# autodetection_retry: 0
-
-# ----------------------------------------------------------------------
-# JOBS (data collection sources)
-#
-# The default JOBS share the same *name*. JOBS with the same name
-# are mutually exclusive. Only one of them will be allowed running at
-# any time. This allows autodetection to try several alternatives and
-# pick the one that works.
-#
-# Any number of jobs is supported.
-#
-# All python.d.plugin JOBS (for all its modules) support a set of
-# predefined parameters. These are:
-#
-# job_name:
-# name: myname # the JOB's name as it will appear at the
-# # dashboard (by default is the job_name)
-# # JOBs sharing a name are mutually exclusive
-# update_every: 1 # the JOB's data collection frequency
-# priority: 60000 # the JOB's order on the dashboard
-# penalty: yes # the JOB's penalty
-# autodetection_retry: 0 # the JOB's re-check interval in seconds \ No newline at end of file
diff --git a/src/collectors/python.d.plugin/spigotmc/README.md b/src/collectors/python.d.plugin/spigotmc/README.md
deleted file mode 120000
index 66e5c9c47..000000000
--- a/src/collectors/python.d.plugin/spigotmc/README.md
+++ /dev/null
@@ -1 +0,0 @@
-integrations/spigotmc.md \ No newline at end of file
diff --git a/src/collectors/python.d.plugin/spigotmc/integrations/spigotmc.md b/src/collectors/python.d.plugin/spigotmc/integrations/spigotmc.md
deleted file mode 100644
index 2e5e60669..000000000
--- a/src/collectors/python.d.plugin/spigotmc/integrations/spigotmc.md
+++ /dev/null
@@ -1,250 +0,0 @@
-<!--startmeta
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/collectors/python.d.plugin/spigotmc/README.md"
-meta_yaml: "https://github.com/netdata/netdata/edit/master/src/collectors/python.d.plugin/spigotmc/metadata.yaml"
-sidebar_label: "SpigotMC"
-learn_status: "Published"
-learn_rel_path: "Collecting Metrics/Gaming"
-most_popular: False
-message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
-endmeta-->
-
-# SpigotMC
-
-
-<img src="https://netdata.cloud/img/spigot.jfif" width="150"/>
-
-
-Plugin: python.d.plugin
-Module: spigotmc
-
-<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
-
-## Overview
-
-This collector monitors SpigotMC server performance, in the form of ticks per second average, memory utilization, and active users.
-
-
-It sends the `tps`, `list` and `online` commands to the Server, and gathers the metrics from the responses.
-
-
-This collector is only supported on the following platforms:
-
-- Linux
-
-This collector supports collecting metrics from multiple instances of this integration, including remote instances.
-
-
-### Default Behavior
-
-#### Auto-Detection
-
-By default, this collector will attempt to connect to a Spigot server running on the local host on port `25575`.
-
-#### Limits
-
-The default configuration for this integration does not impose any limits on data collection.
-
-#### Performance Impact
-
-The default configuration for this integration is not expected to impose a significant performance impact on the system.
-
-
-## Metrics
-
-Metrics grouped by *scope*.
-
-The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
-
-
-
-### Per SpigotMC instance
-
-These metrics refer to the entire monitored application.
-
-This scope has no labels.
-
-Metrics:
-
-| Metric | Dimensions | Unit |
-|:------|:----------|:----|
-| spigotmc.tps | 1 Minute Average, 5 Minute Average, 15 Minute Average | ticks |
-| spigotmc.users | Users | users |
-| spigotmc.mem | used, allocated, max | MiB |
-
-
-
-## Alerts
-
-There are no alerts configured by default for this integration.
-
-
-## Setup
-
-### Prerequisites
-
-#### Enable the Remote Console Protocol
-
-Under your SpigotMC server's `server.properties` configuration file, you should set `enable-rcon` to `true`.
-
-This will allow the Server to listen and respond to queries over the rcon protocol.
-
-
-
-### Configuration
-
-#### File
-
-The configuration file name for this integration is `python.d/spigotmc.conf`.
-
-
-You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
-
-```bash
-cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
-sudo ./edit-config python.d/spigotmc.conf
-```
-#### Options
-
-There are 2 sections:
-
-* Global variables
-* One or more JOBS that can define multiple different instances to monitor.
-
-The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.
-
-Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition.
-
-Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.
-
-
-<details open><summary>Config options</summary>
-
-| Name | Description | Default | Required |
-|:----|:-----------|:-------|:--------:|
-| update_every | Sets the default data collection frequency. | 1 | no |
-| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |
-| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |
-| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |
-| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no |
-| host | The host's IP to connect to. | localhost | yes |
-| port | The port the remote console is listening on. | 25575 | yes |
-| password | Remote console password if any. | | no |
-
-</details>
-
-#### Examples
-
-##### Basic
-
-A basic configuration example.
-
-```yaml
-local:
- name: local_server
- url: 127.0.0.1
- port: 25575
-
-```
-##### Basic Authentication
-
-An example using basic password for authentication with the remote console.
-
-<details open><summary>Config</summary>
-
-```yaml
-local:
- name: local_server_pass
- url: 127.0.0.1
- port: 25575
- password: 'foobar'
-
-```
-</details>
-
-##### Multi-instance
-
-> **Note**: When you define multiple jobs, their names must be unique.
-
-Collecting metrics from local and remote instances.
-
-
-<details open><summary>Config</summary>
-
-```yaml
-local_server:
- name : my_local_server
- url : 127.0.0.1
- port: 25575
-
-remote_server:
- name : another_remote_server
- url : 192.0.2.1
- port: 25575
-
-```
-</details>
-
-
-
-## Troubleshooting
-
-### Debug Mode
-
-
-To troubleshoot issues with the `spigotmc` collector, run the `python.d.plugin` with the debug option enabled. The output
-should give you clues as to why the collector isn't working.
-
-- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
- your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
-
- ```bash
- cd /usr/libexec/netdata/plugins.d/
- ```
-
-- Switch to the `netdata` user.
-
- ```bash
- sudo -u netdata -s
- ```
-
-- Run the `python.d.plugin` to debug the collector:
-
- ```bash
- ./python.d.plugin spigotmc debug trace
- ```
-
-### Getting Logs
-
-If you're encountering problems with the `spigotmc` collector, follow these steps to retrieve logs and identify potential issues:
-
-- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
-- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
-
-#### System with systemd
-
-Use the following command to view logs generated since the last Netdata service restart:
-
-```bash
-journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep spigotmc
-```
-
-#### System without systemd
-
-Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
-
-```bash
-grep spigotmc /var/log/netdata/collector.log
-```
-
-**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
-
-#### Docker Container
-
-If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
-
-```bash
-docker logs netdata 2>&1 | grep spigotmc
-```
-
-
diff --git a/src/collectors/python.d.plugin/spigotmc/metadata.yaml b/src/collectors/python.d.plugin/spigotmc/metadata.yaml
deleted file mode 100644
index 5dea9f0c8..000000000
--- a/src/collectors/python.d.plugin/spigotmc/metadata.yaml
+++ /dev/null
@@ -1,176 +0,0 @@
-plugin_name: python.d.plugin
-modules:
- - meta:
- plugin_name: python.d.plugin
- module_name: spigotmc
- monitored_instance:
- name: SpigotMC
- link: ""
- categories:
- - data-collection.gaming
- icon_filename: "spigot.jfif"
- related_resources:
- integrations:
- list: []
- info_provided_to_referring_integrations:
- description: ""
- keywords:
- - minecraft server
- - spigotmc server
- - spigot
- most_popular: false
- overview:
- data_collection:
- metrics_description: |
- This collector monitors SpigotMC server performance, in the form of ticks per second average, memory utilization, and active users.
- method_description: |
- It sends the `tps`, `list` and `online` commands to the Server, and gathers the metrics from the responses.
- supported_platforms:
- include:
- - Linux
- exclude: []
- multi_instance: true
- additional_permissions:
- description: ""
- default_behavior:
- auto_detection:
- description: By default, this collector will attempt to connect to a Spigot server running on the local host on port `25575`.
- limits:
- description: ""
- performance_impact:
- description: ""
- setup:
- prerequisites:
- list:
- - title: Enable the Remote Console Protocol
- description: |
- Under your SpigotMC server's `server.properties` configuration file, you should set `enable-rcon` to `true`.
-
- This will allow the Server to listen and respond to queries over the rcon protocol.
- configuration:
- file:
- name: "python.d/spigotmc.conf"
- options:
- description: |
- There are 2 sections:
-
- * Global variables
- * One or more JOBS that can define multiple different instances to monitor.
-
- The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.
-
- Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition.
-
- Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.
- folding:
- title: "Config options"
- enabled: true
- list:
- - name: update_every
- description: Sets the default data collection frequency.
- default_value: 1
- required: false
- - name: priority
- description: Controls the order of charts at the netdata dashboard.
- default_value: 60000
- required: false
- - name: autodetection_retry
- description: Sets the job re-check interval in seconds.
- default_value: 0
- required: false
- - name: penalty
- description: Indicates whether to apply penalty to update_every in case of failures.
- default_value: yes
- required: false
- - name: name
- description: >
- Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed
- running at any time. This allows autodetection to try several alternatives and pick the one that works.
- default_value: ""
- required: false
- - name: host
- description: The host's IP to connect to.
- default_value: localhost
- required: true
- - name: port
- description: The port the remote console is listening on.
- default_value: 25575
- required: true
- - name: password
- description: Remote console password if any.
- default_value: ""
- required: false
- examples:
- folding:
- enabled: true
- title: "Config"
- list:
- - name: Basic
- description: A basic configuration example.
- folding:
- enabled: false
- config: |
- local:
- name: local_server
- url: 127.0.0.1
- port: 25575
- - name: Basic Authentication
- description: An example using basic password for authentication with the remote console.
- config: |
- local:
- name: local_server_pass
- url: 127.0.0.1
- port: 25575
- password: 'foobar'
- - name: Multi-instance
- description: |
- > **Note**: When you define multiple jobs, their names must be unique.
-
- Collecting metrics from local and remote instances.
- config: |
- local_server:
- name : my_local_server
- url : 127.0.0.1
- port: 25575
-
- remote_server:
- name : another_remote_server
- url : 192.0.2.1
- port: 25575
- troubleshooting:
- problems:
- list: []
- alerts: []
- metrics:
- folding:
- title: Metrics
- enabled: false
- description: ""
- availability: []
- scopes:
- - name: global
- description: "These metrics refer to the entire monitored application."
- labels: []
- metrics:
- - name: spigotmc.tps
- description: Spigot Ticks Per Second
- unit: "ticks"
- chart_type: line
- dimensions:
- - name: 1 Minute Average
- - name: 5 Minute Average
- - name: 15 Minute Average
- - name: spigotmc.users
- description: Minecraft Users
- unit: "users"
- chart_type: area
- dimensions:
- - name: Users
- - name: spigotmc.mem
- description: Minecraft Memory Usage
- unit: "MiB"
- chart_type: line
- dimensions:
- - name: used
- - name: allocated
- - name: max
diff --git a/src/collectors/python.d.plugin/spigotmc/spigotmc.chart.py b/src/collectors/python.d.plugin/spigotmc/spigotmc.chart.py
deleted file mode 100644
index 81370fb4c..000000000
--- a/src/collectors/python.d.plugin/spigotmc/spigotmc.chart.py
+++ /dev/null
@@ -1,184 +0,0 @@
-# -*- coding: utf-8 -*-
-# Description: spigotmc netdata python.d module
-# Author: Austin S. Hemmelgarn (Ferroin)
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-import platform
-import re
-import socket
-
-from bases.FrameworkServices.SimpleService import SimpleService
-from third_party import mcrcon
-
-# Update only every 5 seconds because collection takes in excess of
-# 100ms sometimes, and most people won't care about second-by-second data.
-update_every = 5
-
-PRECISION = 100
-
-COMMAND_TPS = 'tps'
-COMMAND_LIST = 'list'
-COMMAND_ONLINE = 'online'
-
-ORDER = [
- 'tps',
- 'mem',
- 'users',
-]
-
-CHARTS = {
- 'tps': {
- 'options': [None, 'Spigot Ticks Per Second', 'ticks', 'spigotmc', 'spigotmc.tps', 'line'],
- 'lines': [
- ['tps1', '1 Minute Average', 'absolute', 1, PRECISION],
- ['tps5', '5 Minute Average', 'absolute', 1, PRECISION],
- ['tps15', '15 Minute Average', 'absolute', 1, PRECISION]
- ]
- },
- 'users': {
- 'options': [None, 'Minecraft Users', 'users', 'spigotmc', 'spigotmc.users', 'area'],
- 'lines': [
- ['users', 'Users', 'absolute', 1, 1]
- ]
- },
- 'mem': {
- 'options': [None, 'Minecraft Memory Usage', 'MiB', 'spigotmc', 'spigotmc.mem', 'line'],
- 'lines': [
- ['mem_used', 'used', 'absolute', 1, 1],
- ['mem_alloc', 'allocated', 'absolute', 1, 1],
- ['mem_max', 'max', 'absolute', 1, 1]
- ]
- }
-}
-
-_TPS_REGEX = re.compile(
- # Examples:
- # §6TPS from last 1m, 5m, 15m: §a*20.0, §a*20.0, §a*20.0
- # §6Current Memory Usage: §a936/65536 mb (Max: 65536 mb)
- r'^.*: .*?' # Message lead-in
- r'(\d{1,2}.\d+), .*?' # 1-minute TPS value
- r'(\d{1,2}.\d+), .*?' # 5-minute TPS value
- r'(\d{1,2}\.\d+).*?' # 15-minute TPS value
- r'(\s.*?(\d+)\/(\d+).*?: (\d+).*)?', # Current Memory Usage / Total Memory (Max Memory)
- re.MULTILINE
-)
-_LIST_REGEX = re.compile(
- # Examples:
- # There are 4 of a max 50 players online: player1, player2, player3, player4
- # §6There are §c4§6 out of maximum §c50§6 players online.
- # §6There are §c3§6/§c1§6 out of maximum §c50§6 players online.
- # §6当前有 §c4§6 个玩家在线,最大在线人数为 §c50§6 个玩家.
- # §c4§6 人のプレイヤーが接続中です。最大接続可能人数\:§c 50
- r'[^§](\d+)(?:.*?(?=/).*?[^§](\d+))?', # Current user count.
- re.X
-)
-
-
-class Service(SimpleService):
- def __init__(self, configuration=None, name=None):
- SimpleService.__init__(self, configuration=configuration, name=name)
- self.order = ORDER
- self.definitions = CHARTS
- self.host = self.configuration.get('host', 'localhost')
- self.port = self.configuration.get('port', 25575)
- self.password = self.configuration.get('password', '')
- self.console = mcrcon.MCRcon()
- self.alive = True
-
- def check(self):
- if platform.system() != 'Linux':
- self.error('Only supported on Linux.')
- return False
- try:
- self.connect()
- except (mcrcon.MCRconException, socket.error) as err:
- self.error('Error connecting.')
- self.error(repr(err))
- return False
-
- return self._get_data()
-
- def connect(self):
- self.console.connect(self.host, self.port, self.password)
-
- def reconnect(self):
- self.error('try reconnect.')
- try:
- try:
- self.console.disconnect()
- except mcrcon.MCRconException:
- pass
- self.console.connect(self.host, self.port, self.password)
- self.alive = True
- except (mcrcon.MCRconException, socket.error) as err:
- self.error('Error connecting.')
- self.error(repr(err))
- return False
- return True
-
- def is_alive(self):
- if any(
- [
- not self.alive,
- self.console.socket.getsockopt(socket.IPPROTO_TCP, socket.TCP_INFO, 0) != 1
- ]
- ):
- return self.reconnect()
- return True
-
- def _get_data(self):
- if not self.is_alive():
- return None
-
- data = {}
-
- try:
- raw = self.console.command(COMMAND_TPS)
- match = _TPS_REGEX.match(raw)
- if match:
- data['tps1'] = int(float(match.group(1)) * PRECISION)
- data['tps5'] = int(float(match.group(2)) * PRECISION)
- data['tps15'] = int(float(match.group(3)) * PRECISION)
- if match.group(4):
- data['mem_used'] = int(match.group(5))
- data['mem_alloc'] = int(match.group(6))
- data['mem_max'] = int(match.group(7))
- else:
- self.error('Unable to process TPS values.')
- if not raw:
- self.error(
- "'{0}' command returned no value, make sure you set correct password".format(COMMAND_TPS))
- except mcrcon.MCRconException:
- self.error('Unable to fetch TPS values.')
- except socket.error:
- self.error('Connection is dead.')
- self.alive = False
- return None
-
- try:
- raw = self.console.command(COMMAND_LIST)
- match = _LIST_REGEX.search(raw)
- if not match:
- raw = self.console.command(COMMAND_ONLINE)
- match = _LIST_REGEX.search(raw)
- if match:
- users = int(match.group(1))
- hidden_users = match.group(2)
- if hidden_users:
- hidden_users = int(hidden_users)
- else:
- hidden_users = 0
- data['users'] = users + hidden_users
- else:
- if not raw:
- self.error("'{0}' and '{1}' commands returned no value, make sure you set correct password".format(
- COMMAND_LIST, COMMAND_ONLINE))
- self.error('Unable to process user counts.')
- except mcrcon.MCRconException:
- self.error('Unable to fetch user counts.')
- except socket.error:
- self.error('Connection is dead.')
- self.alive = False
- return None
-
- return data
diff --git a/src/collectors/python.d.plugin/spigotmc/spigotmc.conf b/src/collectors/python.d.plugin/spigotmc/spigotmc.conf
deleted file mode 100644
index f0064ea2f..000000000
--- a/src/collectors/python.d.plugin/spigotmc/spigotmc.conf
+++ /dev/null
@@ -1,66 +0,0 @@
-# netdata python.d.plugin configuration for spigotmc
-#
-# This file is in YaML format. Generally the format is:
-#
-# name: value
-#
-# There are 2 sections:
-# - global variables
-# - one or more JOBS
-#
-# JOBS allow you to collect values from multiple sources.
-# Each source will have its own set of charts.
-#
-# JOB parameters have to be indented (using spaces only, example below).
-
-# ----------------------------------------------------------------------
-# Global Variables
-# These variables set the defaults for all JOBs, however each JOB
-# may define its own, overriding the defaults.
-
-# update_every sets the default data collection frequency.
-# If unset, the python.d.plugin default is used.
-# update_every: 1
-
-# priority controls the order of charts at the netdata dashboard.
-# Lower numbers move the charts towards the top of the page.
-# If unset, the default for python.d.plugin is used.
-# priority: 60000
-
-# penalty indicates whether to apply penalty to update_every in case of failures.
-# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
-# penalty: yes
-
-# autodetection_retry sets the job re-check interval in seconds.
-# The job is not deleted if check fails.
-# Attempts to start the job are made once every autodetection_retry.
-# This feature is disabled by default.
-# autodetection_retry: 0
-
-# ----------------------------------------------------------------------
-# JOBS (data collection sources)
-#
-# The default JOBS share the same *name*. JOBS with the same name
-# are mutually exclusive. Only one of them will be allowed running at
-# any time. This allows autodetection to try several alternatives and
-# pick the one that works.
-#
-# Any number of jobs is supported.
-#
-# All python.d.plugin JOBS (for all its modules) support a set of
-# predefined parameters. These are:
-#
-# job_name:
-# name: myname # the JOB's name as it will appear at the
-# # dashboard (by default is the job_name)
-# # JOBs sharing a name are mutually exclusive
-# update_every: 1 # the JOB's data collection frequency
-# priority: 60000 # the JOB's order on the dashboard
-# penalty: yes # the JOB's penalty
-# autodetection_retry: 0 # the JOB's re-check interval in seconds
-#
-# In addition to the above, spigotmc supports the following:
-#
-# host: localhost # The host to connect to. Defaults to the local system.
-# port: 25575 # The port the remote console is listening on.
-# password: '' # The remote console password. Most be set correctly.
diff --git a/src/collectors/python.d.plugin/traefik/README.md b/src/collectors/python.d.plugin/traefik/README.md
index 079f309c7..f4574051a 100644
--- a/src/collectors/python.d.plugin/traefik/README.md
+++ b/src/collectors/python.d.plugin/traefik/README.md
@@ -1,12 +1,3 @@
-<!--
-title: "Traefik monitoring with Netdata"
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/collectors/python.d.plugin/traefik/README.md"
-sidebar_label: "traefik-python.d.plugin"
-learn_status: "Published"
-learn_topic_type: "References"
-learn_rel_path: "Integrations/Monitor/Webapps"
--->
-
# Traefik collector
Uses the `health` API to provide statistics.
diff --git a/src/collectors/python.d.plugin/varnish/README.md b/src/collectors/python.d.plugin/varnish/README.md
deleted file mode 120000
index 194be2335..000000000
--- a/src/collectors/python.d.plugin/varnish/README.md
+++ /dev/null
@@ -1 +0,0 @@
-integrations/varnish.md \ No newline at end of file
diff --git a/src/collectors/python.d.plugin/varnish/integrations/varnish.md b/src/collectors/python.d.plugin/varnish/integrations/varnish.md
deleted file mode 100644
index 5850dcc4c..000000000
--- a/src/collectors/python.d.plugin/varnish/integrations/varnish.md
+++ /dev/null
@@ -1,247 +0,0 @@
-<!--startmeta
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/collectors/python.d.plugin/varnish/README.md"
-meta_yaml: "https://github.com/netdata/netdata/edit/master/src/collectors/python.d.plugin/varnish/metadata.yaml"
-sidebar_label: "Varnish"
-learn_status: "Published"
-learn_rel_path: "Collecting Metrics/Web Servers and Web Proxies"
-most_popular: False
-message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
-endmeta-->
-
-# Varnish
-
-
-<img src="https://netdata.cloud/img/varnish.svg" width="150"/>
-
-
-Plugin: python.d.plugin
-Module: varnish
-
-<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
-
-## Overview
-
-This collector monitors Varnish metrics about HTTP accelerator global, Backends (VBE) and Storages (SMF, SMA, MSE) statistics.
-
-Note that both, Varnish-Cache (free and open source) and Varnish-Plus (Commercial/Enterprise version), are supported.
-
-
-It uses the `varnishstat` tool in order to collect the metrics.
-
-
-This collector is supported on all platforms.
-
-This collector only supports collecting metrics from a single instance of this integration.
-
-`netdata` user must be a member of the `varnish` group.
-
-
-### Default Behavior
-
-#### Auto-Detection
-
-By default, if the permissions are satisfied, the `varnishstat` tool will be executed on the host.
-
-#### Limits
-
-The default configuration for this integration does not impose any limits on data collection.
-
-#### Performance Impact
-
-The default configuration for this integration is not expected to impose a significant performance impact on the system.
-
-
-## Metrics
-
-Metrics grouped by *scope*.
-
-The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
-
-
-
-### Per Varnish instance
-
-These metrics refer to the entire monitored application.
-
-This scope has no labels.
-
-Metrics:
-
-| Metric | Dimensions | Unit |
-|:------|:----------|:----|
-| varnish.session_connection | accepted, dropped | connections/s |
-| varnish.client_requests | received | requests/s |
-| varnish.all_time_hit_rate | hit, miss, hitpass | percentage |
-| varnish.current_poll_hit_rate | hit, miss, hitpass | percentage |
-| varnish.cached_objects_expired | objects | expired/s |
-| varnish.cached_objects_nuked | objects | nuked/s |
-| varnish.threads_total | None | number |
-| varnish.threads_statistics | created, failed, limited | threads/s |
-| varnish.threads_queue_len | in queue | requests |
-| varnish.backend_connections | successful, unhealthy, reused, closed, recycled, failed | connections/s |
-| varnish.backend_requests | sent | requests/s |
-| varnish.esi_statistics | errors, warnings | problems/s |
-| varnish.memory_usage | free, allocated | MiB |
-| varnish.uptime | uptime | seconds |
-
-### Per Backend
-
-
-
-This scope has no labels.
-
-Metrics:
-
-| Metric | Dimensions | Unit |
-|:------|:----------|:----|
-| varnish.backend | header, body | kilobits/s |
-
-### Per Storage
-
-
-
-This scope has no labels.
-
-Metrics:
-
-| Metric | Dimensions | Unit |
-|:------|:----------|:----|
-| varnish.storage_usage | free, allocated | KiB |
-| varnish.storage_alloc_objs | allocated | objects |
-
-
-
-## Alerts
-
-There are no alerts configured by default for this integration.
-
-
-## Setup
-
-### Prerequisites
-
-#### Provide the necessary permissions
-
-In order for the collector to work, you need to add the `netdata` user to the `varnish` user group, so that it can execute the `varnishstat` tool:
-
-```
-usermod -aG varnish netdata
-```
-
-
-
-### Configuration
-
-#### File
-
-The configuration file name for this integration is `python.d/varnish.conf`.
-
-
-You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
-
-```bash
-cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
-sudo ./edit-config python.d/varnish.conf
-```
-#### Options
-
-There are 2 sections:
-
-* Global variables
-* One or more JOBS that can define multiple different instances to monitor.
-
-The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.
-
-Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition.
-
-Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.
-
-
-<details open><summary>Config options</summary>
-
-| Name | Description | Default | Required |
-|:----|:-----------|:-------|:--------:|
-| instance_name | the name of the varnishd instance to get logs from. If not specified, the local host name is used. | | yes |
-| update_every | Sets the default data collection frequency. | 10 | no |
-| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |
-| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |
-| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |
-| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no |
-
-</details>
-
-#### Examples
-
-##### Basic
-
-An example configuration.
-
-```yaml
-job_name:
- instance_name: '<name-of-varnishd-instance>'
-
-```
-
-
-## Troubleshooting
-
-### Debug Mode
-
-
-To troubleshoot issues with the `varnish` collector, run the `python.d.plugin` with the debug option enabled. The output
-should give you clues as to why the collector isn't working.
-
-- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
- your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
-
- ```bash
- cd /usr/libexec/netdata/plugins.d/
- ```
-
-- Switch to the `netdata` user.
-
- ```bash
- sudo -u netdata -s
- ```
-
-- Run the `python.d.plugin` to debug the collector:
-
- ```bash
- ./python.d.plugin varnish debug trace
- ```
-
-### Getting Logs
-
-If you're encountering problems with the `varnish` collector, follow these steps to retrieve logs and identify potential issues:
-
-- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
-- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
-
-#### System with systemd
-
-Use the following command to view logs generated since the last Netdata service restart:
-
-```bash
-journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep varnish
-```
-
-#### System without systemd
-
-Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
-
-```bash
-grep varnish /var/log/netdata/collector.log
-```
-
-**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
-
-#### Docker Container
-
-If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
-
-```bash
-docker logs netdata 2>&1 | grep varnish
-```
-
-
diff --git a/src/collectors/python.d.plugin/varnish/metadata.yaml b/src/collectors/python.d.plugin/varnish/metadata.yaml
deleted file mode 100644
index d31c1cf6f..000000000
--- a/src/collectors/python.d.plugin/varnish/metadata.yaml
+++ /dev/null
@@ -1,253 +0,0 @@
-plugin_name: python.d.plugin
-modules:
- - meta:
- plugin_name: python.d.plugin
- module_name: varnish
- monitored_instance:
- name: Varnish
- link: https://varnish-cache.org/
- categories:
- - data-collection.web-servers-and-web-proxies
- icon_filename: 'varnish.svg'
- related_resources:
- integrations:
- list: []
- info_provided_to_referring_integrations:
- description: ''
- keywords:
- - varnish
- - varnishstat
- - varnishd
- - cache
- - web server
- - web cache
- most_popular: false
- overview:
- data_collection:
- metrics_description: |
- This collector monitors Varnish metrics about HTTP accelerator global, Backends (VBE) and Storages (SMF, SMA, MSE) statistics.
-
- Note that both, Varnish-Cache (free and open source) and Varnish-Plus (Commercial/Enterprise version), are supported.
- method_description: |
- It uses the `varnishstat` tool in order to collect the metrics.
- supported_platforms:
- include: []
- exclude: []
- multi_instance: false
- additional_permissions:
- description: |
- `netdata` user must be a member of the `varnish` group.
- default_behavior:
- auto_detection:
- description: By default, if the permissions are satisfied, the `varnishstat` tool will be executed on the host.
- limits:
- description: ''
- performance_impact:
- description: ''
- setup:
- prerequisites:
- list:
- - title: Provide the necessary permissions
- description: |
- In order for the collector to work, you need to add the `netdata` user to the `varnish` user group, so that it can execute the `varnishstat` tool:
-
- ```
- usermod -aG varnish netdata
- ```
- configuration:
- file:
- name: python.d/varnish.conf
- description: ''
- options:
- description: |
- There are 2 sections:
-
- * Global variables
- * One or more JOBS that can define multiple different instances to monitor.
-
- The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.
-
- Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition.
-
- Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.
- folding:
- title: Config options
- enabled: true
- list:
- - name: instance_name
- description: the name of the varnishd instance to get logs from. If not specified, the local host name is used.
- default_value: ""
- required: true
- - name: update_every
- description: Sets the default data collection frequency.
- default_value: 10
- required: false
- - name: priority
- description: Controls the order of charts at the netdata dashboard.
- default_value: 60000
- required: false
- - name: autodetection_retry
- description: Sets the job re-check interval in seconds.
- default_value: 0
- required: false
- - name: penalty
- description: Indicates whether to apply penalty to update_every in case of failures.
- default_value: yes
- required: false
- - name: name
- description: Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works.
- default_value: ''
- required: false
- examples:
- folding:
- enabled: true
- title: 'Config'
- list:
- - name: Basic
- description: An example configuration.
- folding:
- enabled: false
- config: |
- job_name:
- instance_name: '<name-of-varnishd-instance>'
- troubleshooting:
- problems:
- list: []
- alerts: []
- metrics:
- folding:
- title: Metrics
- enabled: false
- description: ""
- availability: []
- scopes:
- - name: global
- description: "These metrics refer to the entire monitored application."
- labels: []
- metrics:
- - name: varnish.session_connection
- description: Connections Statistics
- unit: "connections/s"
- chart_type: line
- dimensions:
- - name: accepted
- - name: dropped
- - name: varnish.client_requests
- description: Client Requests
- unit: "requests/s"
- chart_type: line
- dimensions:
- - name: received
- - name: varnish.all_time_hit_rate
- description: All History Hit Rate Ratio
- unit: "percentage"
- chart_type: stacked
- dimensions:
- - name: hit
- - name: miss
- - name: hitpass
- - name: varnish.current_poll_hit_rate
- description: Current Poll Hit Rate Ratio
- unit: "percentage"
- chart_type: stacked
- dimensions:
- - name: hit
- - name: miss
- - name: hitpass
- - name: varnish.cached_objects_expired
- description: Expired Objects
- unit: "expired/s"
- chart_type: line
- dimensions:
- - name: objects
- - name: varnish.cached_objects_nuked
- description: Least Recently Used Nuked Objects
- unit: "nuked/s"
- chart_type: line
- dimensions:
- - name: objects
- - name: varnish.threads_total
- description: Number Of Threads In All Pools
- unit: "number"
- chart_type: line
- dimensions:
- - name: None
- - name: varnish.threads_statistics
- description: Threads Statistics
- unit: "threads/s"
- chart_type: line
- dimensions:
- - name: created
- - name: failed
- - name: limited
- - name: varnish.threads_queue_len
- description: Current Queue Length
- unit: "requests"
- chart_type: line
- dimensions:
- - name: in queue
- - name: varnish.backend_connections
- description: Backend Connections Statistics
- unit: "connections/s"
- chart_type: line
- dimensions:
- - name: successful
- - name: unhealthy
- - name: reused
- - name: closed
- - name: recycled
- - name: failed
- - name: varnish.backend_requests
- description: Requests To The Backend
- unit: "requests/s"
- chart_type: line
- dimensions:
- - name: sent
- - name: varnish.esi_statistics
- description: ESI Statistics
- unit: "problems/s"
- chart_type: line
- dimensions:
- - name: errors
- - name: warnings
- - name: varnish.memory_usage
- description: Memory Usage
- unit: "MiB"
- chart_type: stacked
- dimensions:
- - name: free
- - name: allocated
- - name: varnish.uptime
- description: Uptime
- unit: "seconds"
- chart_type: line
- dimensions:
- - name: uptime
- - name: Backend
- description: ""
- labels: []
- metrics:
- - name: varnish.backend
- description: Backend {backend_name}
- unit: "kilobits/s"
- chart_type: area
- dimensions:
- - name: header
- - name: body
- - name: Storage
- description: ""
- labels: []
- metrics:
- - name: varnish.storage_usage
- description: Storage {storage_name} Usage
- unit: "KiB"
- chart_type: stacked
- dimensions:
- - name: free
- - name: allocated
- - name: varnish.storage_alloc_objs
- description: Storage {storage_name} Allocated Objects
- unit: "objects"
- chart_type: line
- dimensions:
- - name: allocated
diff --git a/src/collectors/python.d.plugin/varnish/varnish.chart.py b/src/collectors/python.d.plugin/varnish/varnish.chart.py
deleted file mode 100644
index 506ad026a..000000000
--- a/src/collectors/python.d.plugin/varnish/varnish.chart.py
+++ /dev/null
@@ -1,385 +0,0 @@
-# -*- coding: utf-8 -*-
-# Description: varnish netdata python.d module
-# Author: ilyam8
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-import re
-
-from bases.FrameworkServices.ExecutableService import ExecutableService
-from bases.collection import find_binary
-
-ORDER = [
- 'session_connections',
- 'client_requests',
- 'all_time_hit_rate',
- 'current_poll_hit_rate',
- 'cached_objects_expired',
- 'cached_objects_nuked',
- 'threads_total',
- 'threads_statistics',
- 'threads_queue_len',
- 'backend_connections',
- 'backend_requests',
- 'esi_statistics',
- 'memory_usage',
- 'uptime'
-]
-
-CHARTS = {
- 'session_connections': {
- 'options': [None, 'Connections Statistics', 'connections/s',
- 'client metrics', 'varnish.session_connection', 'line'],
- 'lines': [
- ['sess_conn', 'accepted', 'incremental'],
- ['sess_dropped', 'dropped', 'incremental']
- ]
- },
- 'client_requests': {
- 'options': [None, 'Client Requests', 'requests/s',
- 'client metrics', 'varnish.client_requests', 'line'],
- 'lines': [
- ['client_req', 'received', 'incremental']
- ]
- },
- 'all_time_hit_rate': {
- 'options': [None, 'All History Hit Rate Ratio', 'percentage', 'cache performance',
- 'varnish.all_time_hit_rate', 'stacked'],
- 'lines': [
- ['cache_hit', 'hit', 'percentage-of-absolute-row'],
- ['cache_miss', 'miss', 'percentage-of-absolute-row'],
- ['cache_hitpass', 'hitpass', 'percentage-of-absolute-row']]
- },
- 'current_poll_hit_rate': {
- 'options': [None, 'Current Poll Hit Rate Ratio', 'percentage', 'cache performance',
- 'varnish.current_poll_hit_rate', 'stacked'],
- 'lines': [
- ['cache_hit', 'hit', 'percentage-of-incremental-row'],
- ['cache_miss', 'miss', 'percentage-of-incremental-row'],
- ['cache_hitpass', 'hitpass', 'percentage-of-incremental-row']
- ]
- },
- 'cached_objects_expired': {
- 'options': [None, 'Expired Objects', 'expired/s', 'cache performance',
- 'varnish.cached_objects_expired', 'line'],
- 'lines': [
- ['n_expired', 'objects', 'incremental']
- ]
- },
- 'cached_objects_nuked': {
- 'options': [None, 'Least Recently Used Nuked Objects', 'nuked/s', 'cache performance',
- 'varnish.cached_objects_nuked', 'line'],
- 'lines': [
- ['n_lru_nuked', 'objects', 'incremental']
- ]
- },
- 'threads_total': {
- 'options': [None, 'Number Of Threads In All Pools', 'number', 'thread related metrics',
- 'varnish.threads_total', 'line'],
- 'lines': [
- ['threads', None, 'absolute']
- ]
- },
- 'threads_statistics': {
- 'options': [None, 'Threads Statistics', 'threads/s', 'thread related metrics',
- 'varnish.threads_statistics', 'line'],
- 'lines': [
- ['threads_created', 'created', 'incremental'],
- ['threads_failed', 'failed', 'incremental'],
- ['threads_limited', 'limited', 'incremental']
- ]
- },
- 'threads_queue_len': {
- 'options': [None, 'Current Queue Length', 'requests', 'thread related metrics',
- 'varnish.threads_queue_len', 'line'],
- 'lines': [
- ['thread_queue_len', 'in queue']
- ]
- },
- 'backend_connections': {
- 'options': [None, 'Backend Connections Statistics', 'connections/s', 'backend metrics',
- 'varnish.backend_connections', 'line'],
- 'lines': [
- ['backend_conn', 'successful', 'incremental'],
- ['backend_unhealthy', 'unhealthy', 'incremental'],
- ['backend_reuse', 'reused', 'incremental'],
- ['backend_toolate', 'closed', 'incremental'],
- ['backend_recycle', 'recycled', 'incremental'],
- ['backend_fail', 'failed', 'incremental']
- ]
- },
- 'backend_requests': {
- 'options': [None, 'Requests To The Backend', 'requests/s', 'backend metrics',
- 'varnish.backend_requests', 'line'],
- 'lines': [
- ['backend_req', 'sent', 'incremental']
- ]
- },
- 'esi_statistics': {
- 'options': [None, 'ESI Statistics', 'problems/s', 'esi related metrics', 'varnish.esi_statistics', 'line'],
- 'lines': [
- ['esi_errors', 'errors', 'incremental'],
- ['esi_warnings', 'warnings', 'incremental']
- ]
- },
- 'memory_usage': {
- 'options': [None, 'Memory Usage', 'MiB', 'memory usage', 'varnish.memory_usage', 'stacked'],
- 'lines': [
- ['memory_free', 'free', 'absolute', 1, 1 << 20],
- ['memory_allocated', 'allocated', 'absolute', 1, 1 << 20]]
- },
- 'uptime': {
- 'lines': [
- ['uptime', None, 'absolute']
- ],
- 'options': [None, 'Uptime', 'seconds', 'uptime', 'varnish.uptime', 'line']
- }
-}
-
-
-def backend_charts_template(name):
- order = [
- '{0}_response_statistics'.format(name),
- ]
-
- charts = {
- order[0]: {
- 'options': [None, 'Backend "{0}"'.format(name), 'kilobits/s', 'backend response statistics',
- 'varnish.backend', 'area'],
- 'lines': [
- ['{0}_beresp_hdrbytes'.format(name), 'header', 'incremental', 8, 1000],
- ['{0}_beresp_bodybytes'.format(name), 'body', 'incremental', -8, 1000]
- ]
- },
- }
-
- return order, charts
-
-
-def storage_charts_template(name):
- order = [
- 'storage_{0}_usage'.format(name),
- 'storage_{0}_alloc_objs'.format(name)
- ]
-
- charts = {
- order[0]: {
- 'options': [None, 'Storage "{0}" Usage'.format(name), 'KiB', 'storage usage', 'varnish.storage_usage', 'stacked'],
- 'lines': [
- ['{0}.g_space'.format(name), 'free', 'absolute', 1, 1 << 10],
- ['{0}.g_bytes'.format(name), 'allocated', 'absolute', 1, 1 << 10]
- ]
- },
- order[1]: {
- 'options': [None, 'Storage "{0}" Allocated Objects'.format(name), 'objects', 'storage usage', 'varnish.storage_alloc_objs', 'line'],
- 'lines': [
- ['{0}.g_alloc'.format(name), 'allocated', 'absolute']
- ]
- }
- }
-
- return order, charts
-
-
-VARNISHSTAT = 'varnishstat'
-
-re_version = re.compile(r'varnish-(?:plus-)?(?P<major>\d+)\.(?P<minor>\d+)\.(?P<patch>\d+)')
-
-
-class VarnishVersion:
- def __init__(self, major, minor, patch):
- self.major = major
- self.minor = minor
- self.patch = patch
-
- def __str__(self):
- return '{0}.{1}.{2}'.format(self.major, self.minor, self.patch)
-
-
-class Parser:
- _backend_new = re.compile(r'VBE.([\d\w_.]+)\(.*?\).(beresp[\w_]+)\s+(\d+)')
- _backend_old = re.compile(r'VBE\.[\d\w-]+\.([\w\d_-]+).(beresp[\w_]+)\s+(\d+)')
- _default = re.compile(r'([A-Z]+\.)?([\d\w_.]+)\s+(\d+)')
-
- def __init__(self):
- self.re_default = None
- self.re_backend = None
-
- def init(self, data):
- data = ''.join(data)
- parsed_main = Parser._default.findall(data)
- if parsed_main:
- self.re_default = Parser._default
-
- parsed_backend = Parser._backend_new.findall(data)
- if parsed_backend:
- self.re_backend = Parser._backend_new
- else:
- parsed_backend = Parser._backend_old.findall(data)
- if parsed_backend:
- self.re_backend = Parser._backend_old
-
- def server_stats(self, data):
- return self.re_default.findall(''.join(data))
-
- def backend_stats(self, data):
- return self.re_backend.findall(''.join(data))
-
-
-class Service(ExecutableService):
- def __init__(self, configuration=None, name=None):
- ExecutableService.__init__(self, configuration=configuration, name=name)
- self.order = ORDER
- self.definitions = CHARTS
- self.instance_name = configuration.get('instance_name')
- self.parser = Parser()
- self.command = None
- self.collected_vbe = set()
- self.collected_storages = set()
-
- def create_command(self):
- varnishstat = find_binary(VARNISHSTAT)
-
- if not varnishstat:
- self.error("can't locate '{0}' binary or binary is not executable by user netdata".format(VARNISHSTAT))
- return False
-
- command = [varnishstat, '-V']
- reply = self._get_raw_data(stderr=True, command=command)
- if not reply:
- self.error(
- "no output from '{0}'. Is varnish running? Not enough privileges?".format(' '.join(self.command)))
- return False
-
- ver = parse_varnish_version(reply)
- if not ver:
- self.error("failed to parse reply from '{0}', used regex :'{1}', reply : {2}".format(
- ' '.join(command), re_version.pattern, reply))
- return False
-
- if self.instance_name:
- self.command = [varnishstat, '-1', '-n', self.instance_name]
- else:
- self.command = [varnishstat, '-1']
-
- if ver.major > 4:
- self.command.extend(['-t', '1'])
-
- self.info("varnish version: {0}, will use command: '{1}'".format(ver, ' '.join(self.command)))
-
- return True
-
- def check(self):
- if not self.create_command():
- return False
-
- # STDOUT is not empty
- reply = self._get_raw_data()
- if not reply:
- self.error("no output from '{0}'. Is it running? Not enough privileges?".format(' '.join(self.command)))
- return False
-
- self.parser.init(reply)
-
- # Output is parsable
- if not self.parser.re_default:
- self.error('cant parse the output...')
- return False
-
- return True
-
- def get_data(self):
- """
- Format data received from shell command
- :return: dict
- """
- raw = self._get_raw_data()
- if not raw:
- return None
-
- data = dict()
- server_stats = self.parser.server_stats(raw)
- if not server_stats:
- return None
-
- stats = dict((param, value) for _, param, value in server_stats)
- data.update(stats)
-
- self.get_vbe_backends(data, raw)
- self.get_storages(server_stats)
-
- # varnish 5 uses default.g_bytes and default.g_space
- data['memory_allocated'] = data.get('s0.g_bytes') or data.get('default.g_bytes')
- data['memory_free'] = data.get('s0.g_space') or data.get('default.g_space')
-
- return data
-
- def get_vbe_backends(self, data, raw):
- if not self.parser.re_backend:
- return
- stats = self.parser.backend_stats(raw)
- if not stats:
- return
-
- for (name, param, value) in stats:
- data['_'.join([name, param])] = value
- if name in self.collected_vbe:
- continue
- self.collected_vbe.add(name)
- self.add_backend_charts(name)
-
- def get_storages(self, server_stats):
- # Storage types:
- # - SMF: File Storage
- # - SMA: Malloc Storage
- # - MSE: Massive Storage Engine (Varnish-Plus only)
- #
- # Stats example:
- # [('SMF.', 'ssdStorage.c_req', '47686'),
- # ('SMF.', 'ssdStorage.c_fail', '0'),
- # ('SMF.', 'ssdStorage.c_bytes', '668102656'),
- # ('SMF.', 'ssdStorage.c_freed', '140980224'),
- # ('SMF.', 'ssdStorage.g_alloc', '39753'),
- # ('SMF.', 'ssdStorage.g_bytes', '527122432'),
- # ('SMF.', 'ssdStorage.g_space', '53159968768'),
- # ('SMF.', 'ssdStorage.g_smf', '40130'),
- # ('SMF.', 'ssdStorage.g_smf_frag', '311'),
- # ('SMF.', 'ssdStorage.g_smf_large', '66')]
- storages = [name for typ, name, _ in server_stats if typ.startswith(('SMF', 'SMA', 'MSE')) and name.endswith('g_space')]
- if not storages:
- return
- for storage in storages:
- storage = storage.split('.')[0]
- if storage in self.collected_storages:
- continue
- self.collected_storages.add(storage)
- self.add_storage_charts(storage)
-
- def add_backend_charts(self, backend_name):
- self.add_charts(backend_name, backend_charts_template)
-
- def add_storage_charts(self, storage_name):
- self.add_charts(storage_name, storage_charts_template)
-
- def add_charts(self, name, charts_template):
- order, charts = charts_template(name)
-
- for chart_name in order:
- params = [chart_name] + charts[chart_name]['options']
- dimensions = charts[chart_name]['lines']
-
- new_chart = self.charts.add_chart(params)
- for dimension in dimensions:
- new_chart.add_dimension(dimension)
-
-
-def parse_varnish_version(lines):
- m = re_version.search(lines[0])
- if not m:
- return None
-
- m = m.groupdict()
- return VarnishVersion(
- int(m['major']),
- int(m['minor']),
- int(m['patch']),
- )
diff --git a/src/collectors/python.d.plugin/varnish/varnish.conf b/src/collectors/python.d.plugin/varnish/varnish.conf
deleted file mode 100644
index 54bfe4dee..000000000
--- a/src/collectors/python.d.plugin/varnish/varnish.conf
+++ /dev/null
@@ -1,66 +0,0 @@
-# netdata python.d.plugin configuration for varnish
-#
-# This file is in YaML format. Generally the format is:
-#
-# name: value
-#
-# There are 2 sections:
-# - global variables
-# - one or more JOBS
-#
-# JOBS allow you to collect values from multiple sources.
-# Each source will have its own set of charts.
-#
-# JOB parameters have to be indented (using spaces only, example below).
-
-# ----------------------------------------------------------------------
-# Global Variables
-# These variables set the defaults for all JOBs, however each JOB
-# may define its own, overriding the defaults.
-
-# update_every sets the default data collection frequency.
-# If unset, the python.d.plugin default is used.
-# update_every: 1
-
-# priority controls the order of charts at the netdata dashboard.
-# Lower numbers move the charts towards the top of the page.
-# If unset, the default for python.d.plugin is used.
-# priority: 60000
-
-# penalty indicates whether to apply penalty to update_every in case of failures.
-# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
-# penalty: yes
-
-# autodetection_retry sets the job re-check interval in seconds.
-# The job is not deleted if check fails.
-# Attempts to start the job are made once every autodetection_retry.
-# This feature is disabled by default.
-# autodetection_retry: 0
-
-# ----------------------------------------------------------------------
-# JOBS (data collection sources)
-#
-# The default JOBS share the same *name*. JOBS with the same name
-# are mutually exclusive. Only one of them will be allowed running at
-# any time. This allows autodetection to try several alternatives and
-# pick the one that works.
-#
-# Any number of jobs is supported.
-#
-# All python.d.plugin JOBS (for all its modules) support a set of
-# predefined parameters. These are:
-#
-# job_name:
-# name: myname # the JOB's name as it will appear at the
-# # dashboard (by default is the job_name)
-# # JOBs sharing a name are mutually exclusive
-# update_every: 1 # the JOB's data collection frequency
-# priority: 60000 # the JOB's order on the dashboard
-# penalty: yes # the JOB's penalty
-# autodetection_retry: 0 # the JOB's re-check interval in seconds
-#
-# Additionally to the above, varnish also supports the following:
-#
-# instance_name: 'name' # the name of the varnishd instance to get logs from. If not specified, the host name is used.
-#
-# ----------------------------------------------------------------------
diff --git a/src/collectors/python.d.plugin/w1sensor/README.md b/src/collectors/python.d.plugin/w1sensor/README.md
deleted file mode 120000
index c0fa9cd1b..000000000
--- a/src/collectors/python.d.plugin/w1sensor/README.md
+++ /dev/null
@@ -1 +0,0 @@
-integrations/1-wire_sensors.md \ No newline at end of file
diff --git a/src/collectors/python.d.plugin/w1sensor/integrations/1-wire_sensors.md b/src/collectors/python.d.plugin/w1sensor/integrations/1-wire_sensors.md
deleted file mode 100644
index 15582879e..000000000
--- a/src/collectors/python.d.plugin/w1sensor/integrations/1-wire_sensors.md
+++ /dev/null
@@ -1,201 +0,0 @@
-<!--startmeta
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/collectors/python.d.plugin/w1sensor/README.md"
-meta_yaml: "https://github.com/netdata/netdata/edit/master/src/collectors/python.d.plugin/w1sensor/metadata.yaml"
-sidebar_label: "1-Wire Sensors"
-learn_status: "Published"
-learn_rel_path: "Collecting Metrics/Hardware Devices and Sensors"
-most_popular: False
-message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
-endmeta-->
-
-# 1-Wire Sensors
-
-
-<img src="https://netdata.cloud/img/1-wire.png" width="150"/>
-
-
-Plugin: python.d.plugin
-Module: w1sensor
-
-<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
-
-## Overview
-
-Monitor 1-Wire Sensors metrics with Netdata for optimal environmental conditions monitoring. Enhance your environmental monitoring with real-time insights and alerts.
-
-The collector uses the wire, w1_gpio, and w1_therm kernel modules. Currently temperature sensors are supported and automatically detected.
-
-This collector is only supported on the following platforms:
-
-- Linux
-
-This collector supports collecting metrics from multiple instances of this integration, including remote instances.
-
-
-### Default Behavior
-
-#### Auto-Detection
-
-The collector will try to auto detect available 1-Wire devices.
-
-#### Limits
-
-The default configuration for this integration does not impose any limits on data collection.
-
-#### Performance Impact
-
-The default configuration for this integration is not expected to impose a significant performance impact on the system.
-
-
-## Metrics
-
-Metrics grouped by *scope*.
-
-The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
-
-
-
-### Per 1-Wire Sensors instance
-
-These metrics refer to the entire monitored application.
-
-This scope has no labels.
-
-Metrics:
-
-| Metric | Dimensions | Unit |
-|:------|:----------|:----|
-| w1sensor.temp | a dimension per sensor | Celsius |
-
-
-
-## Alerts
-
-There are no alerts configured by default for this integration.
-
-
-## Setup
-
-### Prerequisites
-
-#### Required Linux kernel modules
-
-Make sure `wire`, `w1_gpio`, and `w1_therm` kernel modules are loaded.
-
-
-### Configuration
-
-#### File
-
-The configuration file name for this integration is `python.d/w1sensor.conf`.
-
-
-You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
-
-```bash
-cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
-sudo ./edit-config python.d/w1sensor.conf
-```
-#### Options
-
-There are 2 sections:
-
-* Global variables
-* One or more JOBS that can define multiple different instances to monitor.
-
-The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.
-
-Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition.
-
-Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.
-
-
-<details open><summary>Config options</summary>
-
-| Name | Description | Default | Required |
-|:----|:-----------|:-------|:--------:|
-| update_every | Sets the default data collection frequency. | 5 | no |
-| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |
-| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |
-| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |
-| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no |
-| name_<1-Wire id> | This allows associating a human readable name with a sensor's 1-Wire identifier. | | no |
-
-</details>
-
-#### Examples
-
-##### Provide human readable names
-
-Associate two 1-Wire identifiers with human readable names.
-
-```yaml
-sensors:
- name_00000022276e: 'Machine room'
- name_00000022298f: 'Rack 12'
-
-```
-
-
-## Troubleshooting
-
-### Debug Mode
-
-
-To troubleshoot issues with the `w1sensor` collector, run the `python.d.plugin` with the debug option enabled. The output
-should give you clues as to why the collector isn't working.
-
-- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
- your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
-
- ```bash
- cd /usr/libexec/netdata/plugins.d/
- ```
-
-- Switch to the `netdata` user.
-
- ```bash
- sudo -u netdata -s
- ```
-
-- Run the `python.d.plugin` to debug the collector:
-
- ```bash
- ./python.d.plugin w1sensor debug trace
- ```
-
-### Getting Logs
-
-If you're encountering problems with the `w1sensor` collector, follow these steps to retrieve logs and identify potential issues:
-
-- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
-- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
-
-#### System with systemd
-
-Use the following command to view logs generated since the last Netdata service restart:
-
-```bash
-journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep w1sensor
-```
-
-#### System without systemd
-
-Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
-
-```bash
-grep w1sensor /var/log/netdata/collector.log
-```
-
-**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
-
-#### Docker Container
-
-If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
-
-```bash
-docker logs netdata 2>&1 | grep w1sensor
-```
-
-
diff --git a/src/collectors/python.d.plugin/w1sensor/metadata.yaml b/src/collectors/python.d.plugin/w1sensor/metadata.yaml
deleted file mode 100644
index 7b0768237..000000000
--- a/src/collectors/python.d.plugin/w1sensor/metadata.yaml
+++ /dev/null
@@ -1,119 +0,0 @@
-plugin_name: python.d.plugin
-modules:
- - meta:
- plugin_name: python.d.plugin
- module_name: w1sensor
- monitored_instance:
- name: 1-Wire Sensors
- link: "https://www.analog.com/en/product-category/1wire-temperature-sensors.html"
- categories:
- - data-collection.hardware-devices-and-sensors
- icon_filename: "1-wire.png"
- related_resources:
- integrations:
- list: []
- info_provided_to_referring_integrations:
- description: ""
- keywords:
- - temperature
- - sensor
- - 1-wire
- most_popular: false
- overview:
- data_collection:
- metrics_description: "Monitor 1-Wire Sensors metrics with Netdata for optimal environmental conditions monitoring. Enhance your environmental monitoring with real-time insights and alerts."
- method_description: "The collector uses the wire, w1_gpio, and w1_therm kernel modules. Currently temperature sensors are supported and automatically detected."
- supported_platforms:
- include:
- - Linux
- exclude: []
- multi_instance: true
- additional_permissions:
- description: ""
- default_behavior:
- auto_detection:
- description: "The collector will try to auto detect available 1-Wire devices."
- limits:
- description: ""
- performance_impact:
- description: ""
- setup:
- prerequisites:
- list:
- - title: "Required Linux kernel modules"
- description: "Make sure `wire`, `w1_gpio`, and `w1_therm` kernel modules are loaded."
- configuration:
- file:
- name: python.d/w1sensor.conf
- options:
- description: |
- There are 2 sections:
-
- * Global variables
- * One or more JOBS that can define multiple different instances to monitor.
-
- The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.
-
- Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition.
-
- Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.
- folding:
- title: "Config options"
- enabled: true
- list:
- - name: update_every
- description: Sets the default data collection frequency.
- default_value: 5
- required: false
- - name: priority
- description: Controls the order of charts at the netdata dashboard.
- default_value: 60000
- required: false
- - name: autodetection_retry
- description: Sets the job re-check interval in seconds.
- default_value: 0
- required: false
- - name: penalty
- description: Indicates whether to apply penalty to update_every in case of failures.
- default_value: yes
- required: false
- - name: name
- description: Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works.
- default_value: ""
- required: false
- - name: name_<1-Wire id>
- description: This allows associating a human readable name with a sensor's 1-Wire identifier.
- default_value: ""
- required: false
- examples:
- folding:
- enabled: false
- title: "Config"
- list:
- - name: Provide human readable names
- description: Associate two 1-Wire identifiers with human readable names.
- config: |
- sensors:
- name_00000022276e: 'Machine room'
- name_00000022298f: 'Rack 12'
- troubleshooting:
- problems:
- list: []
- alerts: []
- metrics:
- folding:
- title: Metrics
- enabled: false
- description: ""
- availability: []
- scopes:
- - name: global
- description: "These metrics refer to the entire monitored application."
- labels: []
- metrics:
- - name: w1sensor.temp
- description: 1-Wire Temperature Sensor
- unit: "Celsius"
- chart_type: line
- dimensions:
- - name: a dimension per sensor
diff --git a/src/collectors/python.d.plugin/w1sensor/w1sensor.chart.py b/src/collectors/python.d.plugin/w1sensor/w1sensor.chart.py
deleted file mode 100644
index 66797ced3..000000000
--- a/src/collectors/python.d.plugin/w1sensor/w1sensor.chart.py
+++ /dev/null
@@ -1,97 +0,0 @@
-# -*- coding: utf-8 -*-
-# Description: 1-wire temperature monitor netdata python.d module
-# Author: Diomidis Spinellis <http://www.spinellis.gr>
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-import os
-import re
-
-from bases.FrameworkServices.SimpleService import SimpleService
-
-# default module values (can be overridden per job in `config`)
-update_every = 5
-
-# Location where 1-Wire devices can be found
-W1_DIR = '/sys/bus/w1/devices/'
-
-# Lines matching the following regular expression contain a temperature value
-RE_TEMP = re.compile(r' t=(-?\d+)')
-
-ORDER = [
- 'temp',
-]
-
-CHARTS = {
- 'temp': {
- 'options': [None, '1-Wire Temperature Sensor', 'Celsius', 'Temperature', 'w1sensor.temp', 'line'],
- 'lines': []
- }
-}
-
-# Known and supported family members
-# Based on linux/drivers/w1/w1_family.h and w1/slaves/w1_therm.c
-THERM_FAMILY = {
- '10': 'W1_THERM_DS18S20',
- '22': 'W1_THERM_DS1822',
- '28': 'W1_THERM_DS18B20',
- '3b': 'W1_THERM_DS1825',
- '42': 'W1_THERM_DS28EA00',
-}
-
-
-class Service(SimpleService):
- """Provide netdata service for 1-Wire sensors"""
-
- def __init__(self, configuration=None, name=None):
- SimpleService.__init__(self, configuration=configuration, name=name)
- self.order = ORDER
- self.definitions = CHARTS
- self.probes = []
-
- def check(self):
- """Auto-detect available 1-Wire sensors, setting line definitions
- and probes to be monitored."""
- try:
- file_names = os.listdir(W1_DIR)
- except OSError as err:
- self.error(err)
- return False
-
- lines = []
- for file_name in file_names:
- if file_name[2] != '-':
- continue
- if not file_name[0:2] in THERM_FAMILY:
- continue
-
- self.probes.append(file_name)
- identifier = file_name[3:]
- name = identifier
- config_name = self.configuration.get('name_' + identifier)
- if config_name:
- name = config_name
- lines.append(['w1sensor_temp_' + identifier, name, 'absolute',
- 1, 10])
- self.definitions['temp']['lines'] = lines
- return len(self.probes) > 0
-
- def get_data(self):
- """Return data read from sensors."""
- data = dict()
-
- for file_name in self.probes:
- file_path = W1_DIR + file_name + '/w1_slave'
- identifier = file_name[3:]
- try:
- with open(file_path, 'r') as device_file:
- for line in device_file:
- matched = RE_TEMP.search(line)
- if matched:
- # Round to one decimal digit to filter-out noise
- value = round(int(matched.group(1)) / 1000., 1)
- value = int(value * 10)
- data['w1sensor_temp_' + identifier] = value
- except (OSError, IOError) as err:
- self.error(err)
- continue
- return data or None
diff --git a/src/collectors/python.d.plugin/w1sensor/w1sensor.conf b/src/collectors/python.d.plugin/w1sensor/w1sensor.conf
deleted file mode 100644
index b60d28650..000000000
--- a/src/collectors/python.d.plugin/w1sensor/w1sensor.conf
+++ /dev/null
@@ -1,72 +0,0 @@
-# netdata python.d.plugin configuration for w1sensor
-#
-# This file is in YaML format. Generally the format is:
-#
-# name: value
-#
-# There are 2 sections:
-# - global variables
-# - one or more JOBS
-#
-# JOBS allow you to collect values from multiple sources.
-# Each source will have its own set of charts.
-#
-# JOB parameters have to be indented (using spaces only, example below).
-
-# ----------------------------------------------------------------------
-# Global Variables
-# These variables set the defaults for all JOBs, however each JOB
-# may define its own, overriding the defaults.
-
-# update_every sets the default data collection frequency.
-# If unset, the python.d.plugin default is used.
-# update_every: 5
-
-# priority controls the order of charts at the netdata dashboard.
-# Lower numbers move the charts towards the top of the page.
-# If unset, the default for python.d.plugin is used.
-# priority: 60000
-
-# penalty indicates whether to apply penalty to update_every in case of failures.
-# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
-# penalty: yes
-
-# autodetection_retry sets the job re-check interval in seconds.
-# The job is not deleted if check fails.
-# Attempts to start the job are made once every autodetection_retry.
-# This feature is disabled by default.
-# autodetection_retry: 0
-
-# ----------------------------------------------------------------------
-# JOBS (data collection sources)
-#
-# The default JOBS share the same *name*. JOBS with the same name
-# are mutually exclusive. Only one of them will be allowed running at
-# any time. This allows autodetection to try several alternatives and
-# pick the one that works.
-#
-# Any number of jobs is supported.
-#
-# All python.d.plugin JOBS (for all its modules) support a set of
-# predefined parameters. These are:
-#
-# job_name:
-# name: myname # the JOB's name as it will appear at the
-# # dashboard (by default is the job_name)
-# # JOBs sharing a name are mutually exclusive
-# update_every: 5 # the JOB's data collection frequency
-# priority: 60000 # the JOB's order on the dashboard
-# penalty: yes # the JOB's penalty
-# autodetection_retry: 0 # the JOB's re-check interval in seconds
-#
-# Additionally to the above, w1sensor also supports the following:
-#
-# name_<1-Wire id>: '<human readable name>'
-# This allows associating a human readable name with a sensor's 1-Wire
-# identifier. Example:
-# name_00000022276e: 'Machine room'
-# name_00000022298f: 'Rack 12'
-#
-# ----------------------------------------------------------------------
-# AUTO-DETECTION JOBS
-# only one of them will run (they have the same name)
diff --git a/src/collectors/python.d.plugin/zscores/README.md b/src/collectors/python.d.plugin/zscores/README.md
deleted file mode 120000
index 159ce0787..000000000
--- a/src/collectors/python.d.plugin/zscores/README.md
+++ /dev/null
@@ -1 +0,0 @@
-integrations/python.d_zscores.md \ No newline at end of file
diff --git a/src/collectors/python.d.plugin/zscores/integrations/python.d_zscores.md b/src/collectors/python.d.plugin/zscores/integrations/python.d_zscores.md
deleted file mode 100644
index a5d2a7e47..000000000
--- a/src/collectors/python.d.plugin/zscores/integrations/python.d_zscores.md
+++ /dev/null
@@ -1,229 +0,0 @@
-<!--startmeta
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/collectors/python.d.plugin/zscores/README.md"
-meta_yaml: "https://github.com/netdata/netdata/edit/master/src/collectors/python.d.plugin/zscores/metadata.yaml"
-sidebar_label: "python.d zscores"
-learn_status: "Published"
-learn_rel_path: "Collecting Metrics/Other"
-most_popular: False
-message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
-endmeta-->
-
-# python.d zscores
-
-Plugin: python.d.plugin
-Module: zscores
-
-<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
-
-## Overview
-
-By using smoothed, rolling [Z-Scores](https://en.wikipedia.org/wiki/Standard_score) for selected metrics or charts you can narrow down your focus and shorten root cause analysis.
-
-
-This collector uses the [Netdata rest api](/src/web/api/README.md) to get the `mean` and `stddev`
-for each dimension on specified charts over a time range (defined by `train_secs` and `offset_secs`).
-
-For each dimension it will calculate a Z-Score as `z = (x - mean) / stddev` (clipped at `z_clip`). Scores are then smoothed over
-time (`z_smooth_n`) and, if `mode: 'per_chart'`, aggregated across dimensions to a smoothed, rolling chart level Z-Score at each time step.
-
-
-This collector is supported on all platforms.
-
-This collector supports collecting metrics from multiple instances of this integration, including remote instances.
-
-
-### Default Behavior
-
-#### Auto-Detection
-
-This integration doesn't support auto-detection.
-
-#### Limits
-
-The default configuration for this integration does not impose any limits on data collection.
-
-#### Performance Impact
-
-The default configuration for this integration is not expected to impose a significant performance impact on the system.
-
-
-## Metrics
-
-Metrics grouped by *scope*.
-
-The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
-
-
-
-### Per python.d zscores instance
-
-These metrics refer to the entire monitored application.
-
-This scope has no labels.
-
-Metrics:
-
-| Metric | Dimensions | Unit |
-|:------|:----------|:----|
-| zscores.z | a dimension per chart or dimension | z |
-| zscores.3stddev | a dimension per chart or dimension | count |
-
-
-
-## Alerts
-
-There are no alerts configured by default for this integration.
-
-
-## Setup
-
-### Prerequisites
-
-#### Python Requirements
-
-This collector will only work with Python 3 and requires the below packages be installed.
-
-```bash
-# become netdata user
-sudo su -s /bin/bash netdata
-# install required packages
-pip3 install numpy pandas requests netdata-pandas==0.0.38
-```
-
-
-
-### Configuration
-
-#### File
-
-The configuration file name for this integration is `python.d/zscores.conf`.
-
-
-You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
-
-```bash
-cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
-sudo ./edit-config python.d/zscores.conf
-```
-#### Options
-
-There are 2 sections:
-
-* Global variables
-* One or more JOBS that can define multiple different instances to monitor.
-
-The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.
-
-Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition.
-
-Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.
-
-
-<details open><summary>Config options</summary>
-
-| Name | Description | Default | Required |
-|:----|:-----------|:-------|:--------:|
-| charts_regex | what charts to pull data for - A regex like `system\..*/` or `system\..*/apps.cpu/apps.mem` etc. | system\..* | yes |
-| train_secs | length of time (in seconds) to base calculations off for mean and stddev. | 14400 | yes |
-| offset_secs | offset (in seconds) preceding latest data to ignore when calculating mean and stddev. | 300 | yes |
-| train_every_n | recalculate the mean and stddev every n steps of the collector. | 900 | yes |
-| z_smooth_n | smooth the z score (to reduce sensitivity to spikes) by averaging it over last n values. | 15 | yes |
-| z_clip | cap absolute value of zscore (before smoothing) for better stability. | 10 | yes |
-| z_abs | set z_abs: 'true' to make all zscores be absolute values only. | true | yes |
-| burn_in | burn in period in which to initially calculate mean and stddev on every step. | 2 | yes |
-| mode | mode can be to get a zscore 'per_dim' or 'per_chart'. | per_chart | yes |
-| per_chart_agg | per_chart_agg is how you aggregate from dimension to chart when mode='per_chart'. | mean | yes |
-| update_every | Sets the default data collection frequency. | 5 | no |
-| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |
-| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |
-| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |
-
-</details>
-
-#### Examples
-
-##### Default
-
-Default configuration.
-
-```yaml
-local:
- name: 'local'
- host: '127.0.0.1:19999'
- charts_regex: 'system\..*'
- charts_to_exclude: 'system.uptime'
- train_secs: 14400
- offset_secs: 300
- train_every_n: 900
- z_smooth_n: 15
- z_clip: 10
- z_abs: 'true'
- burn_in: 2
- mode: 'per_chart'
- per_chart_agg: 'mean'
-
-```
-
-
-## Troubleshooting
-
-### Debug Mode
-
-
-To troubleshoot issues with the `zscores` collector, run the `python.d.plugin` with the debug option enabled. The output
-should give you clues as to why the collector isn't working.
-
-- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
- your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
-
- ```bash
- cd /usr/libexec/netdata/plugins.d/
- ```
-
-- Switch to the `netdata` user.
-
- ```bash
- sudo -u netdata -s
- ```
-
-- Run the `python.d.plugin` to debug the collector:
-
- ```bash
- ./python.d.plugin zscores debug trace
- ```
-
-### Getting Logs
-
-If you're encountering problems with the `zscores` collector, follow these steps to retrieve logs and identify potential issues:
-
-- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
-- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
-
-#### System with systemd
-
-Use the following command to view logs generated since the last Netdata service restart:
-
-```bash
-journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep zscores
-```
-
-#### System without systemd
-
-Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
-
-```bash
-grep zscores /var/log/netdata/collector.log
-```
-
-**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
-
-#### Docker Container
-
-If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
-
-```bash
-docker logs netdata 2>&1 | grep zscores
-```
-
-
diff --git a/src/collectors/python.d.plugin/zscores/metadata.yaml b/src/collectors/python.d.plugin/zscores/metadata.yaml
deleted file mode 100644
index e027562ad..000000000
--- a/src/collectors/python.d.plugin/zscores/metadata.yaml
+++ /dev/null
@@ -1,187 +0,0 @@
-plugin_name: python.d.plugin
-modules:
- - meta:
- plugin_name: python.d.plugin
- module_name: zscores
- monitored_instance:
- name: python.d zscores
- link: https://en.wikipedia.org/wiki/Standard_score
- categories:
- - data-collection.other
- icon_filename: ""
- related_resources:
- integrations:
- list: []
- info_provided_to_referring_integrations:
- description: ""
- keywords:
- - zscore
- - z-score
- - standard score
- - standard deviation
- - anomaly detection
- - statistical anomaly detection
- most_popular: false
- overview:
- data_collection:
- metrics_description: |
- By using smoothed, rolling [Z-Scores](https://en.wikipedia.org/wiki/Standard_score) for selected metrics or charts you can narrow down your focus and shorten root cause analysis.
- method_description: |
- This collector uses the [Netdata rest api](/src/web/api/README.md) to get the `mean` and `stddev`
- for each dimension on specified charts over a time range (defined by `train_secs` and `offset_secs`).
-
- For each dimension it will calculate a Z-Score as `z = (x - mean) / stddev` (clipped at `z_clip`). Scores are then smoothed over
- time (`z_smooth_n`) and, if `mode: 'per_chart'`, aggregated across dimensions to a smoothed, rolling chart level Z-Score at each time step.
- supported_platforms:
- include: []
- exclude: []
- multi_instance: true
- additional_permissions:
- description: ""
- default_behavior:
- auto_detection:
- description: ""
- limits:
- description: ""
- performance_impact:
- description: ""
- setup:
- prerequisites:
- list:
- - title: Python Requirements
- description: |
- This collector will only work with Python 3 and requires the below packages be installed.
-
- ```bash
- # become netdata user
- sudo su -s /bin/bash netdata
- # install required packages
- pip3 install numpy pandas requests netdata-pandas==0.0.38
- ```
- configuration:
- file:
- name: python.d/zscores.conf
- description: ""
- options:
- description: |
- There are 2 sections:
-
- * Global variables
- * One or more JOBS that can define multiple different instances to monitor.
-
- The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.
-
- Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition.
-
- Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.
- folding:
- title: "Config options"
- enabled: true
- list:
- - name: charts_regex
- description: what charts to pull data for - A regex like `system\..*|` or `system\..*|apps.cpu|apps.mem` etc.
- default_value: "system\\..*"
- required: true
- - name: train_secs
- description: length of time (in seconds) to base calculations off for mean and stddev.
- default_value: 14400
- required: true
- - name: offset_secs
- description: offset (in seconds) preceding latest data to ignore when calculating mean and stddev.
- default_value: 300
- required: true
- - name: train_every_n
- description: recalculate the mean and stddev every n steps of the collector.
- default_value: 900
- required: true
- - name: z_smooth_n
- description: smooth the z score (to reduce sensitivity to spikes) by averaging it over last n values.
- default_value: 15
- required: true
- - name: z_clip
- description: cap absolute value of zscore (before smoothing) for better stability.
- default_value: 10
- required: true
- - name: z_abs
- description: "set z_abs: 'true' to make all zscores be absolute values only."
- default_value: "true"
- required: true
- - name: burn_in
- description: burn in period in which to initially calculate mean and stddev on every step.
- default_value: 2
- required: true
- - name: mode
- description: mode can be to get a zscore 'per_dim' or 'per_chart'.
- default_value: per_chart
- required: true
- - name: per_chart_agg
- description: per_chart_agg is how you aggregate from dimension to chart when mode='per_chart'.
- default_value: mean
- required: true
- - name: update_every
- description: Sets the default data collection frequency.
- default_value: 5
- required: false
- - name: priority
- description: Controls the order of charts at the netdata dashboard.
- default_value: 60000
- required: false
- - name: autodetection_retry
- description: Sets the job re-check interval in seconds.
- default_value: 0
- required: false
- - name: penalty
- description: Indicates whether to apply penalty to update_every in case of failures.
- default_value: yes
- required: false
- examples:
- folding:
- enabled: true
- title: "Config"
- list:
- - name: Default
- description: Default configuration.
- folding:
- enabled: false
- config: |
- local:
- name: 'local'
- host: '127.0.0.1:19999'
- charts_regex: 'system\..*'
- charts_to_exclude: 'system.uptime'
- train_secs: 14400
- offset_secs: 300
- train_every_n: 900
- z_smooth_n: 15
- z_clip: 10
- z_abs: 'true'
- burn_in: 2
- mode: 'per_chart'
- per_chart_agg: 'mean'
- troubleshooting:
- problems:
- list: []
- alerts: []
- metrics:
- folding:
- title: Metrics
- enabled: false
- description: ""
- availability: []
- scopes:
- - name: global
- description: "These metrics refer to the entire monitored application."
- labels: []
- metrics:
- - name: zscores.z
- description: Z Score
- unit: "z"
- chart_type: line
- dimensions:
- - name: a dimension per chart or dimension
- - name: zscores.3stddev
- description: Z Score >3
- unit: "count"
- chart_type: stacked
- dimensions:
- - name: a dimension per chart or dimension
diff --git a/src/collectors/python.d.plugin/zscores/zscores.chart.py b/src/collectors/python.d.plugin/zscores/zscores.chart.py
deleted file mode 100644
index 1099b9376..000000000
--- a/src/collectors/python.d.plugin/zscores/zscores.chart.py
+++ /dev/null
@@ -1,146 +0,0 @@
-# -*- coding: utf-8 -*-
-# Description: zscores netdata python.d module
-# Author: andrewm4894
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-from datetime import datetime
-import re
-
-import requests
-import numpy as np
-import pandas as pd
-
-from bases.FrameworkServices.SimpleService import SimpleService
-from netdata_pandas.data import get_data, get_allmetrics
-
-priority = 60000
-update_every = 5
-disabled_by_default = True
-
-ORDER = [
- 'z',
- '3stddev'
-]
-
-CHARTS = {
- 'z': {
- 'options': ['z', 'Z Score', 'z', 'Z Score', 'zscores.z', 'line'],
- 'lines': []
- },
- '3stddev': {
- 'options': ['3stddev', 'Z Score >3', 'count', '3 Stddev', 'zscores.3stddev', 'stacked'],
- 'lines': []
- },
-}
-
-
-class Service(SimpleService):
- def __init__(self, configuration=None, name=None):
- SimpleService.__init__(self, configuration=configuration, name=name)
- self.host = self.configuration.get('host', '127.0.0.1:19999')
- self.charts_regex = re.compile(self.configuration.get('charts_regex', 'system.*'))
- self.charts_to_exclude = self.configuration.get('charts_to_exclude', '').split(',')
- self.charts_in_scope = [
- c for c in
- list(filter(self.charts_regex.match,
- requests.get(f'http://{self.host}/api/v1/charts').json()['charts'].keys()))
- if c not in self.charts_to_exclude
- ]
- self.train_secs = self.configuration.get('train_secs', 14400)
- self.offset_secs = self.configuration.get('offset_secs', 300)
- self.train_every_n = self.configuration.get('train_every_n', 900)
- self.z_smooth_n = self.configuration.get('z_smooth_n', 15)
- self.z_clip = self.configuration.get('z_clip', 10)
- self.z_abs = bool(self.configuration.get('z_abs', True))
- self.burn_in = self.configuration.get('burn_in', 2)
- self.mode = self.configuration.get('mode', 'per_chart')
- self.per_chart_agg = self.configuration.get('per_chart_agg', 'mean')
- self.order = ORDER
- self.definitions = CHARTS
- self.collected_dims = {'z': set(), '3stddev': set()}
- self.df_mean = pd.DataFrame()
- self.df_std = pd.DataFrame()
- self.df_z_history = pd.DataFrame()
-
- def check(self):
- _ = get_allmetrics(self.host, self.charts_in_scope, wide=True, col_sep='.')
- return True
-
- def validate_charts(self, chart, data, algorithm='absolute', multiplier=1, divisor=1):
- """If dimension not in chart then add it.
- """
- for dim in data:
- if dim not in self.collected_dims[chart]:
- self.collected_dims[chart].add(dim)
- self.charts[chart].add_dimension([dim, dim, algorithm, multiplier, divisor])
-
- for dim in list(self.collected_dims[chart]):
- if dim not in data:
- self.collected_dims[chart].remove(dim)
- self.charts[chart].del_dimension(dim, hide=False)
-
- def train_model(self):
- """Calculate the mean and stddev for all relevant metrics and store them for use in calulcating zscore at each timestep.
- """
- before = int(datetime.now().timestamp()) - self.offset_secs
- after = before - self.train_secs
-
- self.df_mean = get_data(
- self.host, self.charts_in_scope, after, before, points=10, group='average', col_sep='.'
- ).mean().to_frame().rename(columns={0: "mean"})
-
- self.df_std = get_data(
- self.host, self.charts_in_scope, after, before, points=10, group='stddev', col_sep='.'
- ).mean().to_frame().rename(columns={0: "std"})
-
- def create_data(self, df_allmetrics):
- """Use x, mean, stddev to generate z scores and 3stddev flags via some pandas manipulation.
- Returning two dictionaries of dimensions and measures, one for each chart.
-
- :param df_allmetrics <pd.DataFrame>: pandas dataframe with latest data from api/v1/allmetrics.
- :return: (<dict>,<dict>) tuple of dictionaries, one for zscores and the other for a flag if abs(z)>3.
- """
- # calculate clipped z score for each available metric
- df_z = pd.concat([self.df_mean, self.df_std, df_allmetrics], axis=1, join='inner')
- df_z['z'] = ((df_z['value'] - df_z['mean']) / df_z['std']).clip(-self.z_clip, self.z_clip).fillna(0) * 100
- if self.z_abs:
- df_z['z'] = df_z['z'].abs()
-
- # append last z_smooth_n rows of zscores to history table in wide format
- self.df_z_history = self.df_z_history.append(
- df_z[['z']].reset_index().pivot_table(values='z', columns='index'), sort=True
- ).tail(self.z_smooth_n)
-
- # get average zscore for last z_smooth_n for each metric
- df_z_smooth = self.df_z_history.melt(value_name='z').groupby('index')['z'].mean().to_frame()
- df_z_smooth['3stddev'] = np.where(abs(df_z_smooth['z']) > 300, 1, 0)
- data_z = df_z_smooth['z'].add_suffix('_z').to_dict()
-
- # aggregate to chart level if specified
- if self.mode == 'per_chart':
- df_z_smooth['chart'] = ['.'.join(x[0:2]) + '_z' for x in df_z_smooth.index.str.split('.').to_list()]
- if self.per_chart_agg == 'absmax':
- data_z = \
- list(df_z_smooth.groupby('chart').agg({'z': lambda x: max(x, key=abs)})['z'].to_dict().values())[0]
- else:
- data_z = list(df_z_smooth.groupby('chart').agg({'z': [self.per_chart_agg]})['z'].to_dict().values())[0]
-
- data_3stddev = {}
- for k in data_z:
- data_3stddev[k.replace('_z', '')] = 1 if abs(data_z[k]) > 300 else 0
-
- return data_z, data_3stddev
-
- def get_data(self):
-
- if self.runs_counter <= self.burn_in or self.runs_counter % self.train_every_n == 0:
- self.train_model()
-
- data_z, data_3stddev = self.create_data(
- get_allmetrics(self.host, self.charts_in_scope, wide=True, col_sep='.').transpose())
- data = {**data_z, **data_3stddev}
-
- self.validate_charts('z', data_z, divisor=100)
- self.validate_charts('3stddev', data_3stddev)
-
- return data
diff --git a/src/collectors/python.d.plugin/zscores/zscores.conf b/src/collectors/python.d.plugin/zscores/zscores.conf
deleted file mode 100644
index 07d62ebe6..000000000
--- a/src/collectors/python.d.plugin/zscores/zscores.conf
+++ /dev/null
@@ -1,108 +0,0 @@
-# netdata python.d.plugin configuration for example
-#
-# This file is in YaML format. Generally the format is:
-#
-# name: value
-#
-# There are 2 sections:
-# - global variables
-# - one or more JOBS
-#
-# JOBS allow you to collect values from multiple sources.
-# Each source will have its own set of charts.
-#
-# JOB parameters have to be indented (using spaces only, example below).
-
-# ----------------------------------------------------------------------
-# Global Variables
-# These variables set the defaults for all JOBs, however each JOB
-# may define its own, overriding the defaults.
-
-# update_every sets the default data collection frequency.
-# If unset, the python.d.plugin default is used.
-update_every: 5
-
-# priority controls the order of charts at the netdata dashboard.
-# Lower numbers move the charts towards the top of the page.
-# If unset, the default for python.d.plugin is used.
-# priority: 60000
-
-# penalty indicates whether to apply penalty to update_every in case of failures.
-# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
-# penalty: yes
-
-# autodetection_retry sets the job re-check interval in seconds.
-# The job is not deleted if check fails.
-# Attempts to start the job are made once every autodetection_retry.
-# This feature is disabled by default.
-# autodetection_retry: 0
-
-# ----------------------------------------------------------------------
-# JOBS (data collection sources)
-#
-# The default JOBS share the same *name*. JOBS with the same name
-# are mutually exclusive. Only one of them will be allowed running at
-# any time. This allows autodetection to try several alternatives and
-# pick the one that works.
-#
-# Any number of jobs is supported.
-#
-# All python.d.plugin JOBS (for all its modules) support a set of
-# predefined parameters. These are:
-#
-# job_name:
-# name: myname # the JOB's name as it will appear at the
-# # dashboard (by default is the job_name)
-# # JOBs sharing a name are mutually exclusive
-# update_every: 1 # the JOB's data collection frequency
-# priority: 60000 # the JOB's order on the dashboard
-# penalty: yes # the JOB's penalty
-# autodetection_retry: 0 # the JOB's re-check interval in seconds
-#
-# Additionally to the above, example also supports the following:
-#
-# - none
-#
-# ----------------------------------------------------------------------
-# AUTO-DETECTION JOBS
-# only one of them will run (they have the same name)
-
-local:
- name: 'local'
-
- # what host to pull data from
- host: '127.0.0.1:19999'
-
- # what charts to pull data for - A regex like 'system\..*|' or 'system\..*|apps.cpu|apps.mem' etc.
- charts_regex: 'system\..*'
-
- # Charts to exclude, useful if you would like to exclude some specific charts.
- # Note: should be a ',' separated string like 'chart.name,chart.name'.
- charts_to_exclude: 'system.uptime'
-
- # length of time to base calculations off for mean and stddev
- train_secs: 14400 # use last 4 hours to work out the mean and stddev for the zscore
-
- # offset preceding latest data to ignore when calculating mean and stddev
- offset_secs: 300 # ignore last 5 minutes of data when calculating the mean and stddev
-
- # recalculate the mean and stddev every n steps of the collector
- train_every_n: 900 # recalculate mean and stddev every 15 minutes
-
- # smooth the z score by averaging it over last n values
- z_smooth_n: 15 # take a rolling average of the last 15 zscore values to reduce sensitivity to temporary 'spikes'
-
- # cap absolute value of zscore (before smoothing) for better stability
- z_clip: 10 # cap each zscore at 10 so as to avoid really large individual zscores swamping any rolling average
-
- # set z_abs: 'true' to make all zscores be absolute values only.
- z_abs: 'true'
-
- # burn in period in which to initially calculate mean and stddev on every step
- burn_in: 2 # on startup of the collector continually update the mean and stddev in case any gaps or initial calculations fail to return
-
- # mode can be to get a zscore 'per_dim' or 'per_chart'
- mode: 'per_chart' # 'per_chart' means individual dimension level smoothed zscores will be aggregated to one zscore per chart per time step
-
- # per_chart_agg is how you aggregate from dimension to chart when mode='per_chart'
- per_chart_agg: 'mean' # 'absmax' will take the max absolute value across all dimensions but will maintain the sign. 'mean' will just average.
diff --git a/src/collectors/slabinfo.plugin/integrations/linux_kernel_slab_allocator_statistics.md b/src/collectors/slabinfo.plugin/integrations/linux_kernel_slab_allocator_statistics.md
index 1e0db44e4..04e1e4d39 100644
--- a/src/collectors/slabinfo.plugin/integrations/linux_kernel_slab_allocator_statistics.md
+++ b/src/collectors/slabinfo.plugin/integrations/linux_kernel_slab_allocator_statistics.md
@@ -106,8 +106,8 @@ The file format is a modified INI syntax. The general structure is:
[section2]
option3 = some third value
```
-You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the
+Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
```bash
cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
diff --git a/src/collectors/slabinfo.plugin/slabinfo.c b/src/collectors/slabinfo.plugin/slabinfo.c
index 216f31ac6..98adc1513 100644
--- a/src/collectors/slabinfo.plugin/slabinfo.c
+++ b/src/collectors/slabinfo.plugin/slabinfo.c
@@ -167,7 +167,7 @@ struct slabinfo *read_file_slabinfo() {
slabdebug(" Read %lu lines from procfile", (unsigned long)lines);
for(l = 2; l < lines; l++) {
if (unlikely(procfile_linewords(ff, l) < 14)) {
- slabdebug(" Line %zu has only %zu words, skipping", l, procfile_linewords(ff,l));
+ slabdebug(" Line %zu has only %zu words, skipping", l, (size_t)procfile_linewords(ff,l));
continue;
}
@@ -318,6 +318,12 @@ unsigned int do_slab_stats(int update_every) {
}
printf("END\n");
+ fprintf(stdout, "\n");
+ fflush(stdout);
+ if (ferror(stdout) && errno == EPIPE) {
+ netdata_log_error("error writing to stdout: EPIPE. Exiting...");
+ return loops;
+ }
loops++;
@@ -339,7 +345,6 @@ void usage(void) {
}
int main(int argc, char **argv) {
- clocks_init();
nd_log_initialize_for_external_plugins("slabinfo.plugin");
program_name = argv[0];
diff --git a/src/collectors/statsd.plugin/README.md b/src/collectors/statsd.plugin/README.md
index 302829242..b93d6c798 100644
--- a/src/collectors/statsd.plugin/README.md
+++ b/src/collectors/statsd.plugin/README.md
@@ -1,12 +1,3 @@
-<!--
-title: "StatsD"
-description: "The Netdata Agent is a fully-featured StatsD server that collects metrics from any custom application and visualizes them in real-time."
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/collectors/statsd.plugin/README.md"
-sidebar_label: "StatsD"
-learn_status: "Published"
-learn_rel_path: "Integrations/Monitor/Anything"
--->
-
# StatsD
[StatsD](https://github.com/statsd/statsd) is a system to collect data from any application. Applications send metrics to it,
@@ -170,11 +161,11 @@ You can find the configuration at `/etc/netdata/netdata.conf`:
[statsd]
# enabled = yes
# decimal detail = 1000
- # update every (flushInterval) = 1
+ # update every (flushInterval) = 1s
# udp messages to process at once = 10
# create private charts for metrics matching = *
# max private charts hard limit = 1000
- # cleanup obsolete charts after secs = 0
+ # cleanup obsolete charts after = 0
# private charts memory mode = save
# private charts history = 3996
# histograms and timers percentile (percentThreshold) = 95.00000
@@ -204,7 +195,7 @@ You can find the configuration at `/etc/netdata/netdata.conf`:
is a space separated list of IPs and ports to listen to. The format is `PROTOCOL:IP:PORT` - if `PORT` is omitted, the `default port` will be used. If `IP` is IPv6, it needs to be enclosed in `[]`. `IP` can also be `*` (to listen on all IPs) or even a hostname.
-- `update every (flushInterval) = 1` seconds, controls the frequency StatsD will push the collected metrics to Netdata charts.
+- `update every (flushInterval) = 1s` controls the frequency StatsD will push the collected metrics to Netdata charts.
- `decimal detail = 1000` controls the number of fractional digits in gauges and histograms. Netdata collects metrics using signed 64-bit integers and their fractional detail is controlled using multipliers and divisors. This setting is used to multiply all collected values to convert them to integers and is also set as the divisors, so that the final data will be a floating point number with this fractional detail (1000 = X.0 - X.999, 10000 = X.0 - X.9999, etc).
@@ -238,7 +229,7 @@ The default behavior is to use the same settings as the rest of the Netdata Agen
For optimization reasons, Netdata imposes a hard limit on private metric charts. The limit is set via the `max private charts hard limit` setting (which defaults to 1000 charts). Metrics above this hard limit are still collected, but they can only be used in synthetic charts (once a metric is added to chart, it will be sent to backend servers too).
-If you have many ephemeral metrics collected (i.e. that you collect values for a certain amount of time), you can set the configuration option `set charts as obsolete after secs`. Setting a value in seconds here, means that Netdata will mark those metrics (and their private charts) as obsolete after the specified time has passed since the last sent metric value. Those charts will later be deleted according to the setting in `cleanup obsolete charts after secs`. Setting `set charts as obsolete after secs` to 0 (which is also the default value) will disable this functionality.
+If you have many ephemeral metrics collected (i.e. that you collect values for a certain amount of time), you can set the configuration option `set charts as obsolete after`. Setting a value in seconds here, means that Netdata will mark those metrics (and their private charts) as obsolete after the specified time has passed since the last sent metric value. Those charts will later be deleted according to the setting in `cleanup obsolete charts after`. Setting `set charts as obsolete after` to 0 (which is also the default value) will disable this functionality.
Example private charts (automatically generated without any configuration):
@@ -785,7 +776,7 @@ visualize all the available operations.
Start by creating a new configuration file under the `statsd.d/` folder in the
[Netdata config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
-Use [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-netdataconf)
+Use [`edit-config`](/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config)
to create a new file called `k6.conf`.
```bash=
@@ -794,7 +785,7 @@ sudo ./edit-config statsd.d/k6.conf
Copy the following configuration into your file as a starting point.
-```conf
+```text
[app]
name = k6
metrics = k6*
@@ -973,7 +964,7 @@ Note that Netdata will report the rate for metrics and counters, even if k6 or a
sends an _absolute_ number. For example, k6 sends absolute HTTP requests with `http_reqs`,
but Netdata visualizes that in `requests/second`.
-To enable this StatsD configuration, [restart Netdata](/packaging/installer/README.md#maintaining-a-netdata-agent-installation).
+To enable this StatsD configuration, [restart Netdata](/docs/netdata-agent/start-stop-restart.md).
### Final touches
diff --git a/src/collectors/statsd.plugin/asterisk.md b/src/collectors/statsd.plugin/asterisk.md
index 302fb932f..d7cb588e5 100644
--- a/src/collectors/statsd.plugin/asterisk.md
+++ b/src/collectors/statsd.plugin/asterisk.md
@@ -1,11 +1,3 @@
-<!--
-title: "Asterisk monitoring with Netdata"
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/collectors/statsd.plugin/asterisk.md"
-sidebar_label: "Asterisk"
-learn_status: "Published"
-learn_rel_path: "Integrations/Monitor/VoIP"
--->
-
# Asterisk collector
Monitors [Asterisk](https://www.asterisk.org/) dialplan application's statistics.
diff --git a/src/collectors/statsd.plugin/k6.md b/src/collectors/statsd.plugin/k6.md
index b657ff1a9..76939d3bb 100644
--- a/src/collectors/statsd.plugin/k6.md
+++ b/src/collectors/statsd.plugin/k6.md
@@ -1,11 +1,3 @@
-<!--
-title: "K6 load test monitoring with Netdata"
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/collectors/statsd.plugin/k6.md"
-sidebar_label: "K6 Load Testing"
-learn_status: "Published"
-learn_rel_path: "Integrations/Monitor/apps"
--->
-
# K6 load test collector
Monitors the impact of load testing experiments performed with [K6](https://k6.io/).
diff --git a/src/collectors/statsd.plugin/statsd.c b/src/collectors/statsd.plugin/statsd.c
index f83818059..11a6ac968 100644
--- a/src/collectors/statsd.plugin/statsd.c
+++ b/src/collectors/statsd.plugin/statsd.c
@@ -1283,7 +1283,7 @@ static int statsd_readfile(const char *filename, STATSD_APP *app, STATSD_APP_CHA
// find the directory name from the file we already read
char *filename2 = strdupz(filename); // copy filename, since dirname() will change it
char *dir = dirname(filename2); // find the directory part of the filename
- tmp = strdupz_path_subpath(dir, s); // compose the new filename to read;
+ tmp = filename_from_path_entry_strdupz(dir, s); // compose the new filename to read;
freez(filename2); // free the filename we copied
}
statsd_readfile(tmp, app, chart, dict);
@@ -2491,10 +2491,11 @@ void *statsd_main(void *ptr) {
statsd.enabled = config_get_boolean(CONFIG_SECTION_PLUGINS, "statsd", statsd.enabled);
statsd.update_every = default_rrd_update_every;
- statsd.update_every = (int)config_get_number(CONFIG_SECTION_STATSD, "update every (flushInterval)", statsd.update_every);
+ statsd.update_every = (int)config_get_duration_seconds(CONFIG_SECTION_STATSD, "update every (flushInterval)", statsd.update_every);
if(statsd.update_every < default_rrd_update_every) {
collector_error("STATSD: minimum flush interval %d given, but the minimum is the update every of netdata. Using %d", statsd.update_every, default_rrd_update_every);
statsd.update_every = default_rrd_update_every;
+ config_set_duration_seconds(CONFIG_SECTION_STATSD, "update every (flushInterval)", statsd.update_every);
}
#ifdef HAVE_RECVMMSG
@@ -2504,13 +2505,26 @@ void *statsd_main(void *ptr) {
statsd.charts_for = simple_pattern_create(
config_get(CONFIG_SECTION_STATSD, "create private charts for metrics matching", "*"), NULL,
SIMPLE_PATTERN_EXACT, true);
- statsd.max_private_charts_hard = (size_t)config_get_number(CONFIG_SECTION_STATSD, "max private charts hard limit", (long long)statsd.max_private_charts_hard);
- statsd.set_obsolete_after = (size_t)config_get_number(CONFIG_SECTION_STATSD, "set charts as obsolete after secs", (long long)statsd.set_obsolete_after);
- statsd.decimal_detail = (collected_number)config_get_number(CONFIG_SECTION_STATSD, "decimal detail", (long long int)statsd.decimal_detail);
- statsd.tcp_idle_timeout = (size_t) config_get_number(CONFIG_SECTION_STATSD, "disconnect idle tcp clients after seconds", (long long int)statsd.tcp_idle_timeout);
- statsd.private_charts_hidden = (unsigned int)config_get_boolean(CONFIG_SECTION_STATSD, "private charts hidden", statsd.private_charts_hidden);
- statsd.histogram_percentile = (double)config_get_float(CONFIG_SECTION_STATSD, "histograms and timers percentile (percentThreshold)", statsd.histogram_percentile);
+ statsd.max_private_charts_hard =
+ (size_t)config_get_number(CONFIG_SECTION_STATSD, "max private charts hard limit", (long long)statsd.max_private_charts_hard);
+
+ statsd.set_obsolete_after =
+ (size_t)config_get_duration_seconds(CONFIG_SECTION_STATSD, "set charts as obsolete after", (long long)statsd.set_obsolete_after);
+
+ statsd.decimal_detail =
+ (collected_number)config_get_number(CONFIG_SECTION_STATSD, "decimal detail", (long long int)statsd.decimal_detail);
+
+ statsd.tcp_idle_timeout =
+ (size_t) config_get_duration_seconds(CONFIG_SECTION_STATSD, "disconnect idle tcp clients after", (long long int)statsd.tcp_idle_timeout);
+
+ statsd.private_charts_hidden =
+ (unsigned int)config_get_boolean(CONFIG_SECTION_STATSD, "private charts hidden", statsd.private_charts_hidden);
+
+ statsd.histogram_percentile =
+ (double)config_get_double(
+ CONFIG_SECTION_STATSD, "histograms and timers percentile (percentThreshold)", statsd.histogram_percentile);
+
if(isless(statsd.histogram_percentile, 0) || isgreater(statsd.histogram_percentile, 100)) {
collector_error("STATSD: invalid histograms and timers percentile %0.5f given", statsd.histogram_percentile);
statsd.histogram_percentile = 95.0;
@@ -2521,7 +2535,8 @@ void *statsd_main(void *ptr) {
statsd.histogram_percentile_str = strdupz(buffer);
}
- statsd.dictionary_max_unique = config_get_number(CONFIG_SECTION_STATSD, "dictionaries max unique dimensions", statsd.dictionary_max_unique);
+ statsd.dictionary_max_unique =
+ config_get_number(CONFIG_SECTION_STATSD, "dictionaries max unique dimensions", statsd.dictionary_max_unique);
if(config_get_boolean(CONFIG_SECTION_STATSD, "add dimension for number of events received", 0)) {
statsd.gauges.default_options |= STATSD_METRIC_OPTION_CHART_DIMENSION_COUNT;
@@ -2803,12 +2818,11 @@ void *statsd_main(void *ptr) {
// ----------------------------------------------------------------------------------------------------------------
// statsd thread to turn metrics into charts
- usec_t step = statsd.update_every * USEC_PER_SEC;
heartbeat_t hb;
- heartbeat_init(&hb);
+ heartbeat_init(&hb, statsd.update_every * USEC_PER_SEC);
while(service_running(SERVICE_COLLECTORS)) {
worker_is_idle();
- heartbeat_next(&hb, step);
+ heartbeat_next(&hb);
worker_is_busy(WORKER_STATSD_FLUSH_GAUGES);
statsd_flush_index_metrics(&statsd.gauges, statsd_flush_gauge);
diff --git a/src/collectors/systemd-journal.plugin/README.md b/src/collectors/systemd-journal.plugin/README.md
index 9f73ba30e..74eba78de 100644
--- a/src/collectors/systemd-journal.plugin/README.md
+++ b/src/collectors/systemd-journal.plugin/README.md
@@ -1,4 +1,3 @@
-
# `systemd` journal plugin
[KEY FEATURES](#key-features) | [JOURNAL SOURCES](#journal-sources) | [JOURNAL FIELDS](#journal-fields) |
@@ -40,8 +39,8 @@ For more information check [this discussion](https://github.com/netdata/netdata/
The following are limitations related to the availability of the plugin:
-- Netdata versions prior to 1.44 shipped in a docker container do not include this plugin.
- The problem is that `libsystemd` is not available in Alpine Linux (there is a `libsystemd`, but it is a dummy that
+- Netdata versions prior to 1.44 shipped in a docker container do not include this plugin.
+ The problem is that `libsystemd` is not available in Alpine Linux (there is a `libsystemd`, but it is a dummy that
returns failure on all calls). Starting with Netdata version 1.44, Netdata containers use a Debian base image
making this plugin available when Netdata is running in a container.
- For the same reason (lack of `systemd` support for Alpine Linux), the plugin is not available on `static` builds of
@@ -321,7 +320,7 @@ algorithm to allow it respond promptly. It works like this:
6. In systemd versions 254 or later, the plugin fetches the unique sequence number of each log entry and calculates the
the percentage of the file matched by the query, versus the total number of the log entries in the journal file.
7. In systemd versions prior to 254, the plugin estimates the number of entries the journal file contributes to the
- query, using the amount of log entries matched it vs. the total duration the log file has entries for.
+ query, using the amount of log entries matched it vs. the total duration the log file has entries for.
The above allow the plugin to respond promptly even when the number of log entries in the journal files is several
dozens millions, while providing accurate estimations of the log entries over time at the histogram and enough counters
diff --git a/src/collectors/systemd-journal.plugin/active_journal_centralization_guide_no_encryption.md b/src/collectors/systemd-journal.plugin/active_journal_centralization_guide_no_encryption.md
index cbed1e81e..ef57e1d24 100644
--- a/src/collectors/systemd-journal.plugin/active_journal_centralization_guide_no_encryption.md
+++ b/src/collectors/systemd-journal.plugin/active_journal_centralization_guide_no_encryption.md
@@ -47,7 +47,7 @@ sudo systemctl enable --now systemd-journal-gatewayd.socket
To use it, open your web browser and navigate to:
-```
+```text
http://server.ip:19531/browse
```
diff --git a/src/collectors/systemd-journal.plugin/forward_secure_sealing.md b/src/collectors/systemd-journal.plugin/forward_secure_sealing.md
index b41570d68..3ab7c8d08 100644
--- a/src/collectors/systemd-journal.plugin/forward_secure_sealing.md
+++ b/src/collectors/systemd-journal.plugin/forward_secure_sealing.md
@@ -5,12 +5,14 @@ Given that attackers often try to hide their actions by modifying or deleting lo
FSS provides administrators with a mechanism to identify any such unauthorized alterations.
## Importance
+
Logs are a crucial component of system monitoring and auditing. Ensuring their integrity means administrators can trust
the data, detect potential breaches, and trace actions back to their origins. Traditional methods to maintain this
integrity involve writing logs to external systems or printing them out. While these methods are effective, they are
not foolproof. FSS offers a more streamlined approach, allowing for log verification directly on the local system.
## How FSS Works
+
FSS operates by "sealing" binary logs at regular intervals. This seal is a cryptographic operation, ensuring that any
tampering with the logs prior to the sealing can be detected. If an attacker modifies logs before they are sealed,
these changes become a permanent part of the sealed record, highlighting any malicious activity.
@@ -29,6 +31,7 @@ administrators to verify older seals. If logs are tampered with, verification wi
breach.
## Enabling FSS
+
To enable FSS, use the following command:
```bash
@@ -43,6 +46,7 @@ journalctl --setup-keys --interval=10s
```
## Verifying Journals
+
After enabling FSS, you can verify the integrity of your logs using the verification key:
```bash
@@ -52,6 +56,7 @@ journalctl --verify
If any discrepancies are found, you'll be alerted, indicating potential tampering.
## Disabling FSS
+
Should you wish to disable FSS:
**Delete the Sealing Key**: This stops new log entries from being sealed.
@@ -66,7 +71,6 @@ journalctl --rotate
journalctl --vacuum-time=1s
```
-
**Adjust Systemd Configuration (Optional)**: If you've made changes to facilitate FSS in `/etc/systemd/journald.conf`,
consider reverting or adjusting those. Restart the systemd-journald service afterward:
@@ -75,6 +79,7 @@ systemctl restart systemd-journald
```
## Conclusion
+
FSS is a significant advancement in maintaining log integrity. While not a replacement for all traditional integrity
methods, it offers a valuable tool in the battle against unauthorized log tampering. By integrating FSS into your log
management strategy, you ensure a more transparent, reliable, and tamper-evident logging system.
diff --git a/src/collectors/systemd-journal.plugin/passive_journal_centralization_guide_no_encryption.md b/src/collectors/systemd-journal.plugin/passive_journal_centralization_guide_no_encryption.md
index b70c22033..a89379e4b 100644
--- a/src/collectors/systemd-journal.plugin/passive_journal_centralization_guide_no_encryption.md
+++ b/src/collectors/systemd-journal.plugin/passive_journal_centralization_guide_no_encryption.md
@@ -74,7 +74,7 @@ sudo apt-get install systemd-journal-remote
Edit `/etc/systemd/journal-upload.conf` and set the IP address and the port of the server, like so:
-```conf
+```text
[Upload]
URL=http://centralization.server.ip:19532
```
@@ -87,7 +87,7 @@ sudo systemctl edit systemd-journal-upload
At the top, add:
-```conf
+```text
[Service]
Restart=always
```
diff --git a/src/collectors/systemd-journal.plugin/passive_journal_centralization_guide_self_signed_certs.md b/src/collectors/systemd-journal.plugin/passive_journal_centralization_guide_self_signed_certs.md
index f8b9a62f0..f4038e812 100644
--- a/src/collectors/systemd-journal.plugin/passive_journal_centralization_guide_self_signed_certs.md
+++ b/src/collectors/systemd-journal.plugin/passive_journal_centralization_guide_self_signed_certs.md
@@ -46,9 +46,9 @@ sudo ./systemd-journal-self-signed-certs.sh "server1" "DNS:hostname1" "IP:10.0.0
Where:
- - `server1` is the canonical name of the server. On newer systemd version, this name will be used by `systemd-journal-remote` and Netdata when you view the logs on the dashboard.
- - `DNS:hostname1` is a DNS name that the server is reachable at. Add `"DNS:xyz"` multiple times to define multiple DNS names for the server.
- - `IP:10.0.0.1` is an IP that the server is reachable at. Add `"IP:xyz"` multiple times to define multiple IPs for the server.
+- `server1` is the canonical name of the server. On newer systemd version, this name will be used by `systemd-journal-remote` and Netdata when you view the logs on the dashboard.
+- `DNS:hostname1` is a DNS name that the server is reachable at. Add `"DNS:xyz"` multiple times to define multiple DNS names for the server.
+- `IP:10.0.0.1` is an IP that the server is reachable at. Add `"IP:xyz"` multiple times to define multiple IPs for the server.
Repeat this process to create the certificates for all your servers. You can add servers as required, at any time in the future.
@@ -150,7 +150,7 @@ sudo apt-get install systemd-journal-remote
Edit `/etc/systemd/journal-upload.conf` and set the IP address and the port of the server, like so:
-```conf
+```text
[Upload]
URL=https://centralization.server.ip:19532
```
@@ -165,7 +165,7 @@ sudo systemctl edit systemd-journal-upload.service
At the top, add:
-```conf
+```text
[Service]
Restart=always
```
@@ -198,7 +198,6 @@ Here it is in action, in Netdata:
![2023-10-18 16-23-05](https://github.com/netdata/netdata/assets/2662304/83bec232-4770-455b-8f1c-46b5de5f93a2)
-
## Verify it works
To verify the central server is receiving logs, run this on the central server:
diff --git a/src/collectors/systemd-journal.plugin/systemd-internals.h b/src/collectors/systemd-journal.plugin/systemd-internals.h
index 31acb2f20..a2d13de64 100644
--- a/src/collectors/systemd-journal.plugin/systemd-internals.h
+++ b/src/collectors/systemd-journal.plugin/systemd-internals.h
@@ -153,4 +153,6 @@ static inline bool parse_journal_field(const char *data, size_t data_length, con
void systemd_journal_dyncfg_init(struct functions_evloop_globals *wg);
+bool is_journal_file(const char *filename, ssize_t len, const char **start_of_extension);
+
#endif //NETDATA_COLLECTORS_SYSTEMD_INTERNALS_H
diff --git a/src/collectors/systemd-journal.plugin/systemd-journal-annotations.c b/src/collectors/systemd-journal.plugin/systemd-journal-annotations.c
index c5b708714..c9ade0a33 100644
--- a/src/collectors/systemd-journal.plugin/systemd-journal-annotations.c
+++ b/src/collectors/systemd-journal.plugin/systemd-journal-annotations.c
@@ -3,18 +3,6 @@
#include "systemd-internals.h"
// ----------------------------------------------------------------------------
-#include "libnetdata/maps/system-users.h"
-#include "libnetdata/maps/system-groups.h"
-
-static struct {
- USERNAMES_CACHE *uc;
- GROUPNAMES_CACHE *gc;
-} systemd_annotations_globals = {
- .uc = NULL,
- .gc = NULL,
-};
-
-// ----------------------------------------------------------------------------
const char *errno_map[] = {
[1] = "1 (EPERM)", // "Operation not permitted",
@@ -369,9 +357,9 @@ void netdata_systemd_journal_transform_uid(FACETS *facets __maybe_unused, BUFFER
const char *v = buffer_tostring(wb);
if(*v && isdigit(*v)) {
uid_t uid = str2i(buffer_tostring(wb));
- STRING *u = system_usernames_cache_lookup_uid(systemd_annotations_globals.uc, uid);
- buffer_contents_replace(wb, string2str(u), string_strlen(u));
- string_freez(u);
+ CACHED_USERNAME cu = cached_username_get_by_uid(uid);
+ buffer_contents_replace(wb, string2str(cu.username), string_strlen(cu.username));
+ cached_username_release(cu);
}
}
@@ -382,9 +370,9 @@ void netdata_systemd_journal_transform_gid(FACETS *facets __maybe_unused, BUFFER
const char *v = buffer_tostring(wb);
if(*v && isdigit(*v)) {
gid_t gid = str2i(buffer_tostring(wb));
- STRING *g = system_groupnames_cache_lookup_gid(systemd_annotations_globals.gc, gid);
- buffer_contents_replace(wb, string2str(g), string_strlen(g));
- string_freez(g);
+ CACHED_GROUPNAME cg = cached_groupname_get_by_gid(gid);
+ buffer_contents_replace(wb, string2str(cg.groupname), string_strlen(cg.groupname));
+ cached_groupname_release(cg);
}
}
@@ -650,8 +638,10 @@ void netdata_systemd_journal_transform_message_id(FACETS *facets __maybe_unused,
// ----------------------------------------------------------------------------
void netdata_systemd_journal_annotations_init(void) {
- systemd_annotations_globals.uc = system_usernames_cache_init();
- systemd_annotations_globals.gc = system_groupnames_cache_init();
+ cached_usernames_init();
+ cached_groupnames_init();
+ update_cached_host_users();
+ update_cached_host_groups();
netdata_systemd_journal_message_ids_init();
}
diff --git a/src/collectors/systemd-journal.plugin/systemd-journal-dyncfg.c b/src/collectors/systemd-journal.plugin/systemd-journal-dyncfg.c
index 469f9d2cf..d8098bd6c 100644
--- a/src/collectors/systemd-journal.plugin/systemd-journal-dyncfg.c
+++ b/src/collectors/systemd-journal.plugin/systemd-journal-dyncfg.c
@@ -58,6 +58,10 @@ static int systemd_journal_directories_dyncfg_update(BUFFER *result, BUFFER *pay
struct json_object *journalDirectories;
json_object_object_get_ex(jobj, JOURNAL_DIRECTORIES_JSON_NODE, &journalDirectories);
+ if (json_object_get_type(journalDirectories) != json_type_array)
+ return dyncfg_default_response(result, HTTP_RESP_BAD_REQUEST,
+ "member " JOURNAL_DIRECTORIES_JSON_NODE " is not an array");
+
size_t n_directories = json_object_array_length(journalDirectories);
if(n_directories > MAX_JOURNAL_DIRECTORIES)
return dyncfg_default_response(result, HTTP_RESP_BAD_REQUEST, "too many directories configured");
diff --git a/src/collectors/systemd-journal.plugin/systemd-journal-files.c b/src/collectors/systemd-journal.plugin/systemd-journal-files.c
index a05cd1c5c..ea0511f7a 100644
--- a/src/collectors/systemd-journal.plugin/systemd-journal-files.c
+++ b/src/collectors/systemd-journal.plugin/systemd-journal-files.c
@@ -285,8 +285,8 @@ void journal_file_update_header(const char *filename, struct journal_file *jf) {
if(dash_seqnum) {
const char *dash_first_msg_ut = strchr(dash_seqnum + 1, '-');
if(dash_first_msg_ut) {
- const char *dot_journal = strstr(dash_first_msg_ut + 1, ".journal");
- if(dot_journal) {
+ const char *dot_journal = NULL;
+ if(is_journal_file(filename, -1, &dot_journal) && dot_journal && dot_journal > dash_first_msg_ut) {
if(dash_seqnum - at - 1 == 32 &&
dash_first_msg_ut - dash_seqnum - 1 == 16 &&
dot_journal - dash_first_msg_ut - 1 == 16) {
@@ -369,8 +369,7 @@ static STRING *string_strdupz_source(const char *s, const char *e, size_t max_le
buf[max_len - 1] = '\0';
for(size_t i = 0; buf[i] ;i++)
- if(!isalnum(buf[i]) && buf[i] != '-' && buf[i] != '.' && buf[i] != ':')
- buf[i] = '_';
+ if(!is_netdata_api_valid_character(buf[i])) buf[i] = '_';
return string_strdupz(buf);
}
@@ -393,7 +392,7 @@ static void files_registry_insert_cb(const DICTIONARY_ITEM *item, void *value, v
char *e = strchr(s, '@');
if(!e)
- e = strstr(s, ".journal");
+ is_journal_file(s, -1, (const char **)&e);
if(e) {
const char *d = s;
@@ -475,19 +474,6 @@ struct journal_file_source {
uint64_t size;
};
-static void human_readable_size_ib(uint64_t size, char *dst, size_t dst_len) {
- if(size > 1024ULL * 1024 * 1024 * 1024)
- snprintfz(dst, dst_len, "%0.2f TiB", (double)size / 1024.0 / 1024.0 / 1024.0 / 1024.0);
- else if(size > 1024ULL * 1024 * 1024)
- snprintfz(dst, dst_len, "%0.2f GiB", (double)size / 1024.0 / 1024.0 / 1024.0);
- else if(size > 1024ULL * 1024)
- snprintfz(dst, dst_len, "%0.2f MiB", (double)size / 1024.0 / 1024.0);
- else if(size > 1024ULL)
- snprintfz(dst, dst_len, "%0.2f KiB", (double)size / 1024.0);
- else
- snprintfz(dst, dst_len, "%"PRIu64" B", size);
-}
-
#define print_duration(dst, dst_len, pos, remaining, duration, one, many, printed) do { \
if((remaining) > (duration)) { \
uint64_t _count = (remaining) / (duration); \
@@ -498,22 +484,6 @@ static void human_readable_size_ib(uint64_t size, char *dst, size_t dst_len) {
} \
} while(0)
-static void human_readable_duration_s(time_t duration_s, char *dst, size_t dst_len) {
- if(duration_s < 0)
- duration_s = -duration_s;
-
- size_t pos = 0;
- dst[0] = 0 ;
-
- bool printed = false;
- print_duration(dst, dst_len, pos, duration_s, 86400 * 365, "year", "years", printed);
- print_duration(dst, dst_len, pos, duration_s, 86400 * 30, "month", "months", printed);
- print_duration(dst, dst_len, pos, duration_s, 86400 * 1, "day", "days", printed);
- print_duration(dst, dst_len, pos, duration_s, 3600 * 1, "hour", "hours", printed);
- print_duration(dst, dst_len, pos, duration_s, 60 * 1, "min", "mins", printed);
- print_duration(dst, dst_len, pos, duration_s, 1, "sec", "secs", printed);
-}
-
static int journal_file_to_json_array_cb(const DICTIONARY_ITEM *item, void *entry, void *data) {
struct journal_file_source *jfs = entry;
BUFFER *wb = data;
@@ -522,12 +492,12 @@ static int journal_file_to_json_array_cb(const DICTIONARY_ITEM *item, void *entr
buffer_json_add_array_item_object(wb);
{
- char size_for_humans[100];
- human_readable_size_ib(jfs->size, size_for_humans, sizeof(size_for_humans));
+ char size_for_humans[128];
+ size_snprintf(size_for_humans, sizeof(size_for_humans), jfs->size, "B", false);
- char duration_for_humans[1024];
- human_readable_duration_s((time_t)((jfs->last_ut - jfs->first_ut) / USEC_PER_SEC),
- duration_for_humans, sizeof(duration_for_humans));
+ char duration_for_humans[128];
+ duration_snprintf(duration_for_humans, sizeof(duration_for_humans),
+ (time_t)((jfs->last_ut - jfs->first_ut) / USEC_PER_SEC), "s", true);
char info[1024];
snprintfz(info, sizeof(info), "%zu files, with a total size of %s, covering %s",
@@ -602,10 +572,39 @@ static void files_registry_delete_cb(const DICTIONARY_ITEM *item, void *value, v
string_freez(jf->source);
}
-void journal_directory_scan_recursively(DICTIONARY *files, DICTIONARY *dirs, const char *dirname, int depth) {
- static const char *ext = ".journal";
- static const ssize_t ext_len = sizeof(".journal") - 1;
+#define EXT_DOT_JOURNAL ".journal"
+#define EXT_DOT_JOURNAL_TILDA ".journal~"
+
+static struct {
+ const char *ext;
+ ssize_t len;
+} valid_journal_extension[] = {
+ { .ext = EXT_DOT_JOURNAL, .len = sizeof(EXT_DOT_JOURNAL) - 1 },
+ { .ext = EXT_DOT_JOURNAL_TILDA, .len = sizeof(EXT_DOT_JOURNAL_TILDA) - 1 },
+};
+
+bool is_journal_file(const char *filename, ssize_t len, const char **start_of_extension) {
+ if(len < 0)
+ len = (ssize_t)strlen(filename);
+ for(size_t i = 0; i < _countof(valid_journal_extension) ;i++) {
+ const char *ext = valid_journal_extension[i].ext;
+ ssize_t elen = valid_journal_extension[i].len;
+
+ if(len > elen && strcmp(filename + len - elen, ext) == 0) {
+ if(start_of_extension)
+ *start_of_extension = filename + len - elen;
+ return true;
+ }
+ }
+
+ if(start_of_extension)
+ *start_of_extension = NULL;
+
+ return false;
+}
+
+void journal_directory_scan_recursively(DICTIONARY *files, DICTIONARY *dirs, const char *dirname, int depth) {
if (depth > VAR_LOG_JOURNAL_MAX_DEPTH)
return;
@@ -635,7 +634,7 @@ void journal_directory_scan_recursively(DICTIONARY *files, DICTIONARY *dirs, con
if (entry->d_type == DT_DIR) {
journal_directory_scan_recursively(files, dirs, full_path, depth++);
}
- else if (entry->d_type == DT_REG && len > ext_len && strcmp(full_path + len - ext_len, ext) == 0) {
+ else if (entry->d_type == DT_REG && is_journal_file(full_path, len, NULL)) {
if(files)
dictionary_set(files, full_path, NULL, 0);
@@ -653,7 +652,7 @@ void journal_directory_scan_recursively(DICTIONARY *files, DICTIONARY *dirs, con
journal_directory_scan_recursively(files, dirs, resolved_path, depth++);
}
}
- else if(S_ISREG(info.st_mode) && len > ext_len && strcmp(full_path + len - ext_len, ext) == 0) {
+ else if(S_ISREG(info.st_mode) && is_journal_file(full_path, len, NULL)) {
if(files)
dictionary_set(files, full_path, NULL, 0);
@@ -756,6 +755,7 @@ void journal_files_registry_update(void) {
dictionary_del(journal_files_registry, jf_dfe.name);
}
dfe_done(jf);
+ dictionary_garbage_collect(journal_files_registry);
journal_files_scans++;
spinlock_unlock(&spinlock);
diff --git a/src/collectors/systemd-journal.plugin/systemd-journal-sampling.h b/src/collectors/systemd-journal.plugin/systemd-journal-sampling.h
new file mode 100644
index 000000000..0e1fed2d6
--- /dev/null
+++ b/src/collectors/systemd-journal.plugin/systemd-journal-sampling.h
@@ -0,0 +1,378 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_SYSTEMD_JOURNAL_SAMPLING_H
+#define NETDATA_SYSTEMD_JOURNAL_SAMPLING_H
+
+// ----------------------------------------------------------------------------
+// sampling support
+
+static inline void sampling_query_init(LOGS_QUERY_STATUS *lqs, FACETS *facets) {
+ if(!lqs->rq.sampling)
+ return;
+
+ if(!lqs->rq.slice) {
+ // the user is doing a full data query
+ // disable sampling
+ lqs->rq.sampling = 0;
+ return;
+ }
+
+ if(lqs->rq.data_only) {
+ // the user is doing a data query
+ // disable sampling
+ lqs->rq.sampling = 0;
+ return;
+ }
+
+ if(!lqs->c.files_matched) {
+ // no files have been matched
+ // disable sampling
+ lqs->rq.sampling = 0;
+ return;
+ }
+
+ lqs->c.samples.slots = facets_histogram_slots(facets);
+ if(lqs->c.samples.slots < 2)
+ lqs->c.samples.slots = 2;
+ if(lqs->c.samples.slots > SYSTEMD_JOURNAL_SAMPLING_SLOTS)
+ lqs->c.samples.slots = SYSTEMD_JOURNAL_SAMPLING_SLOTS;
+
+ if(!lqs->rq.after_ut || !lqs->rq.before_ut || lqs->rq.after_ut >= lqs->rq.before_ut) {
+ // we don't have enough information for sampling
+ lqs->rq.sampling = 0;
+ return;
+ }
+
+ usec_t delta = lqs->rq.before_ut - lqs->rq.after_ut;
+ usec_t step = delta / facets_histogram_slots(facets) - 1;
+ if(step < 1) step = 1;
+
+ lqs->c.samples_per_time_slot.start_ut = lqs->rq.after_ut;
+ lqs->c.samples_per_time_slot.end_ut = lqs->rq.before_ut;
+ lqs->c.samples_per_time_slot.step_ut = step;
+
+ // the minimum number of rows to enable sampling
+ lqs->c.samples.enable_after_samples = lqs->rq.sampling / 2;
+
+ size_t files_matched = lqs->c.files_matched;
+ if(!files_matched)
+ files_matched = 1;
+
+ // the minimum number of rows per file to enable sampling
+ lqs->c.samples_per_file.enable_after_samples = (lqs->rq.sampling / 4) / files_matched;
+ if(lqs->c.samples_per_file.enable_after_samples < lqs->rq.entries)
+ lqs->c.samples_per_file.enable_after_samples = lqs->rq.entries;
+
+ // the minimum number of rows per time slot to enable sampling
+ lqs->c.samples_per_time_slot.enable_after_samples = (lqs->rq.sampling / 4) / lqs->c.samples.slots;
+ if(lqs->c.samples_per_time_slot.enable_after_samples < lqs->rq.entries)
+ lqs->c.samples_per_time_slot.enable_after_samples = lqs->rq.entries;
+}
+
+static inline void sampling_file_init(LOGS_QUERY_STATUS *lqs, struct journal_file *jf __maybe_unused) {
+ lqs->c.samples_per_file.sampled = 0;
+ lqs->c.samples_per_file.unsampled = 0;
+ lqs->c.samples_per_file.estimated = 0;
+ lqs->c.samples_per_file.every = 0;
+ lqs->c.samples_per_file.skipped = 0;
+ lqs->c.samples_per_file.recalibrate = 0;
+}
+
+static inline size_t sampling_file_lines_scanned_so_far(LOGS_QUERY_STATUS *lqs) {
+ size_t sampled = lqs->c.samples_per_file.sampled + lqs->c.samples_per_file.unsampled;
+ if(!sampled) sampled = 1;
+ return sampled;
+}
+
+static inline void sampling_running_file_query_overlapping_timeframe_ut(
+ LOGS_QUERY_STATUS *lqs, struct journal_file *jf, FACETS_ANCHOR_DIRECTION direction,
+ usec_t msg_ut, usec_t *after_ut, usec_t *before_ut) {
+
+ // find the overlap of the query and file timeframes
+ // taking into account the first message we encountered
+
+ usec_t oldest_ut, newest_ut;
+ if(direction == FACETS_ANCHOR_DIRECTION_FORWARD) {
+ // the first message we know (oldest)
+ oldest_ut = lqs->c.query_file.first_msg_ut ? lqs->c.query_file.first_msg_ut : jf->msg_first_ut;
+ if(!oldest_ut) oldest_ut = lqs->c.query_file.start_ut;
+
+ if(jf->msg_last_ut)
+ newest_ut = MIN(lqs->c.query_file.stop_ut, jf->msg_last_ut);
+ else if(jf->file_last_modified_ut)
+ newest_ut = MIN(lqs->c.query_file.stop_ut, jf->file_last_modified_ut);
+ else
+ newest_ut = lqs->c.query_file.stop_ut;
+
+ if(msg_ut < oldest_ut)
+ oldest_ut = msg_ut - 1;
+ }
+ else /* BACKWARD */ {
+ // the latest message we know (newest)
+ newest_ut = lqs->c.query_file.first_msg_ut ? lqs->c.query_file.first_msg_ut : jf->msg_last_ut;
+ if(!newest_ut) newest_ut = lqs->c.query_file.start_ut;
+
+ if(jf->msg_first_ut)
+ oldest_ut = MAX(lqs->c.query_file.stop_ut, jf->msg_first_ut);
+ else
+ oldest_ut = lqs->c.query_file.stop_ut;
+
+ if(newest_ut < msg_ut)
+ newest_ut = msg_ut + 1;
+ }
+
+ *after_ut = oldest_ut;
+ *before_ut = newest_ut;
+}
+
+static inline double sampling_running_file_query_progress_by_time(
+ LOGS_QUERY_STATUS *lqs, struct journal_file *jf,
+ FACETS_ANCHOR_DIRECTION direction, usec_t msg_ut) {
+
+ usec_t after_ut, before_ut, elapsed_ut;
+ sampling_running_file_query_overlapping_timeframe_ut(lqs, jf, direction, msg_ut, &after_ut, &before_ut);
+
+ if(direction == FACETS_ANCHOR_DIRECTION_FORWARD)
+ elapsed_ut = msg_ut - after_ut;
+ else
+ elapsed_ut = before_ut - msg_ut;
+
+ usec_t total_ut = before_ut - after_ut;
+ double progress = (double)elapsed_ut / (double)total_ut;
+
+ return progress;
+}
+
+static inline usec_t sampling_running_file_query_remaining_time(
+ LOGS_QUERY_STATUS *lqs, struct journal_file *jf,
+ FACETS_ANCHOR_DIRECTION direction, usec_t msg_ut,
+ usec_t *total_time_ut, usec_t *remaining_start_ut,
+ usec_t *remaining_end_ut) {
+ usec_t after_ut, before_ut;
+ sampling_running_file_query_overlapping_timeframe_ut(lqs, jf, direction, msg_ut, &after_ut, &before_ut);
+
+ // since we have a timestamp in msg_ut
+ // this timestamp can extend the overlap
+ if(msg_ut <= after_ut)
+ after_ut = msg_ut - 1;
+
+ if(msg_ut >= before_ut)
+ before_ut = msg_ut + 1;
+
+ // return the remaining duration
+ usec_t remaining_from_ut, remaining_to_ut;
+ if(direction == FACETS_ANCHOR_DIRECTION_FORWARD) {
+ remaining_from_ut = msg_ut;
+ remaining_to_ut = before_ut;
+ }
+ else {
+ remaining_from_ut = after_ut;
+ remaining_to_ut = msg_ut;
+ }
+
+ usec_t remaining_ut = remaining_to_ut - remaining_from_ut;
+
+ if(total_time_ut)
+ *total_time_ut = (before_ut > after_ut) ? before_ut - after_ut : 1;
+
+ if(remaining_start_ut)
+ *remaining_start_ut = remaining_from_ut;
+
+ if(remaining_end_ut)
+ *remaining_end_ut = remaining_to_ut;
+
+ return remaining_ut;
+}
+
+static inline size_t sampling_running_file_query_estimate_remaining_lines_by_time(
+ LOGS_QUERY_STATUS *lqs,
+ struct journal_file *jf,
+ FACETS_ANCHOR_DIRECTION direction,
+ usec_t msg_ut) {
+ size_t scanned_lines = sampling_file_lines_scanned_so_far(lqs);
+
+ // Calculate the proportion of time covered
+ usec_t total_time_ut, remaining_start_ut, remaining_end_ut;
+ usec_t remaining_time_ut = sampling_running_file_query_remaining_time(
+ lqs, jf, direction, msg_ut, &total_time_ut, &remaining_start_ut, &remaining_end_ut);
+ if (total_time_ut == 0) total_time_ut = 1;
+
+ double proportion_by_time = (double) (total_time_ut - remaining_time_ut) / (double) total_time_ut;
+
+ if (proportion_by_time == 0 || proportion_by_time > 1.0 || !isfinite(proportion_by_time))
+ proportion_by_time = 1.0;
+
+ // Estimate the total number of lines in the file
+ size_t expected_matching_logs_by_time = (size_t)((double)scanned_lines / proportion_by_time);
+
+ if(jf->messages_in_file && expected_matching_logs_by_time > jf->messages_in_file)
+ expected_matching_logs_by_time = jf->messages_in_file;
+
+ // Calculate the estimated number of remaining lines
+ size_t remaining_logs_by_time = expected_matching_logs_by_time - scanned_lines;
+ if (remaining_logs_by_time < 1) remaining_logs_by_time = 1;
+
+ // nd_log(NDLS_COLLECTORS, NDLP_INFO,
+ // "JOURNAL ESTIMATION: '%s' "
+ // "scanned_lines=%zu [sampled=%zu, unsampled=%zu, estimated=%zu], "
+ // "file [%"PRIu64" - %"PRIu64", duration %"PRId64", known lines in file %zu], "
+ // "query [%"PRIu64" - %"PRIu64", duration %"PRId64"], "
+ // "first message read from the file at %"PRIu64", current message at %"PRIu64", "
+ // "proportion of time %.2f %%, "
+ // "expected total lines in file %zu, "
+ // "remaining lines %zu, "
+ // "remaining time %"PRIu64" [%"PRIu64" - %"PRIu64", duration %"PRId64"]"
+ // , jf->filename
+ // , scanned_lines, fqs->samples_per_file.sampled, fqs->samples_per_file.unsampled, fqs->samples_per_file.estimated
+ // , jf->msg_first_ut, jf->msg_last_ut, jf->msg_last_ut - jf->msg_first_ut, jf->messages_in_file
+ // , fqs->query_file.start_ut, fqs->query_file.stop_ut, fqs->query_file.stop_ut - fqs->query_file.start_ut
+ // , fqs->query_file.first_msg_ut, msg_ut
+ // , proportion_by_time * 100.0
+ // , expected_matching_logs_by_time
+ // , remaining_logs_by_time
+ // , remaining_time_ut, remaining_start_ut, remaining_end_ut, remaining_end_ut - remaining_start_ut
+ // );
+
+ return remaining_logs_by_time;
+}
+
+static inline size_t sampling_running_file_query_estimate_remaining_lines(
+ sd_journal *j __maybe_unused, LOGS_QUERY_STATUS *lqs, struct journal_file *jf,
+ FACETS_ANCHOR_DIRECTION direction, usec_t msg_ut) {
+ size_t remaining_logs_by_seqnum = 0;
+
+#ifdef HAVE_SD_JOURNAL_GET_SEQNUM
+ size_t expected_matching_logs_by_seqnum = 0;
+ double proportion_by_seqnum = 0.0;
+ uint64_t current_msg_seqnum;
+ sd_id128_t current_msg_writer;
+ if(!lqs->c.query_file.first_msg_seqnum || sd_journal_get_seqnum(j, &current_msg_seqnum, &current_msg_writer) < 0) {
+ lqs->c.query_file.first_msg_seqnum = 0;
+ lqs->c.query_file.first_msg_writer = SD_ID128_NULL;
+ }
+ else if(jf->messages_in_file) {
+ size_t scanned_lines = sampling_file_lines_scanned_so_far(lqs);
+
+ double proportion_of_all_lines_so_far;
+ if(direction == FACETS_ANCHOR_DIRECTION_FORWARD)
+ proportion_of_all_lines_so_far = (double)scanned_lines / (double)(current_msg_seqnum - jf->first_seqnum);
+ else
+ proportion_of_all_lines_so_far = (double)scanned_lines / (double)(jf->last_seqnum - current_msg_seqnum);
+
+ if(proportion_of_all_lines_so_far > 1.0)
+ proportion_of_all_lines_so_far = 1.0;
+
+ expected_matching_logs_by_seqnum = (size_t)(proportion_of_all_lines_so_far * (double)jf->messages_in_file);
+
+ proportion_by_seqnum = (double)scanned_lines / (double)expected_matching_logs_by_seqnum;
+
+ if (proportion_by_seqnum == 0 || proportion_by_seqnum > 1.0 || !isfinite(proportion_by_seqnum))
+ proportion_by_seqnum = 1.0;
+
+ remaining_logs_by_seqnum = expected_matching_logs_by_seqnum - scanned_lines;
+ if(!remaining_logs_by_seqnum) remaining_logs_by_seqnum = 1;
+ }
+#endif
+
+ if(remaining_logs_by_seqnum)
+ return remaining_logs_by_seqnum;
+
+ return sampling_running_file_query_estimate_remaining_lines_by_time(lqs, jf, direction, msg_ut);
+}
+
+static inline void sampling_decide_file_sampling_every(sd_journal *j,
+ LOGS_QUERY_STATUS *lqs, struct journal_file *jf, FACETS_ANCHOR_DIRECTION direction, usec_t msg_ut) {
+ size_t files_matched = lqs->c.files_matched;
+ if(!files_matched) files_matched = 1;
+
+ size_t remaining_lines = sampling_running_file_query_estimate_remaining_lines(j, lqs, jf, direction, msg_ut);
+ size_t wanted_samples = (lqs->rq.sampling / 2) / files_matched;
+ if(!wanted_samples) wanted_samples = 1;
+
+ lqs->c.samples_per_file.every = remaining_lines / wanted_samples;
+
+ if(lqs->c.samples_per_file.every < 1)
+ lqs->c.samples_per_file.every = 1;
+}
+
+typedef enum {
+ SAMPLING_STOP_AND_ESTIMATE = -1,
+ SAMPLING_FULL = 0,
+ SAMPLING_SKIP_FIELDS = 1,
+} sampling_t;
+
+static inline sampling_t is_row_in_sample(
+ sd_journal *j, LOGS_QUERY_STATUS *lqs, struct journal_file *jf,
+ usec_t msg_ut, FACETS_ANCHOR_DIRECTION direction, bool candidate_to_keep) {
+ if(!lqs->rq.sampling || candidate_to_keep)
+ return SAMPLING_FULL;
+
+ if(unlikely(msg_ut < lqs->c.samples_per_time_slot.start_ut))
+ msg_ut = lqs->c.samples_per_time_slot.start_ut;
+ if(unlikely(msg_ut > lqs->c.samples_per_time_slot.end_ut))
+ msg_ut = lqs->c.samples_per_time_slot.end_ut;
+
+ size_t slot = (msg_ut - lqs->c.samples_per_time_slot.start_ut) / lqs->c.samples_per_time_slot.step_ut;
+ if(slot >= lqs->c.samples.slots)
+ slot = lqs->c.samples.slots - 1;
+
+ bool should_sample = false;
+
+ if(lqs->c.samples.sampled < lqs->c.samples.enable_after_samples ||
+ lqs->c.samples_per_file.sampled < lqs->c.samples_per_file.enable_after_samples ||
+ lqs->c.samples_per_time_slot.sampled[slot] < lqs->c.samples_per_time_slot.enable_after_samples)
+ should_sample = true;
+
+ else if(lqs->c.samples_per_file.recalibrate >= SYSTEMD_JOURNAL_SAMPLING_RECALIBRATE || !lqs->c.samples_per_file.every) {
+ // this is the first to be unsampled for this file
+ sampling_decide_file_sampling_every(j, lqs, jf, direction, msg_ut);
+ lqs->c.samples_per_file.recalibrate = 0;
+ should_sample = true;
+ }
+ else {
+ // we sample 1 every fqs->samples_per_file.every
+ if(lqs->c.samples_per_file.skipped >= lqs->c.samples_per_file.every) {
+ lqs->c.samples_per_file.skipped = 0;
+ should_sample = true;
+ }
+ else
+ lqs->c.samples_per_file.skipped++;
+ }
+
+ if(should_sample) {
+ lqs->c.samples.sampled++;
+ lqs->c.samples_per_file.sampled++;
+ lqs->c.samples_per_time_slot.sampled[slot]++;
+
+ return SAMPLING_FULL;
+ }
+
+ lqs->c.samples_per_file.recalibrate++;
+
+ lqs->c.samples.unsampled++;
+ lqs->c.samples_per_file.unsampled++;
+ lqs->c.samples_per_time_slot.unsampled[slot]++;
+
+ if(lqs->c.samples_per_file.unsampled > lqs->c.samples_per_file.sampled) {
+ double progress_by_time = sampling_running_file_query_progress_by_time(lqs, jf, direction, msg_ut);
+
+ if(progress_by_time > SYSTEMD_JOURNAL_ENABLE_ESTIMATIONS_FILE_PERCENTAGE)
+ return SAMPLING_STOP_AND_ESTIMATE;
+ }
+
+ return SAMPLING_SKIP_FIELDS;
+}
+
+static inline void sampling_update_running_query_file_estimates(
+ FACETS *facets, sd_journal *j,
+ LOGS_QUERY_STATUS *lqs, struct journal_file *jf, usec_t msg_ut, FACETS_ANCHOR_DIRECTION direction) {
+ usec_t total_time_ut, remaining_start_ut, remaining_end_ut;
+ sampling_running_file_query_remaining_time(
+ lqs, jf, direction, msg_ut, &total_time_ut, &remaining_start_ut, &remaining_end_ut);
+ size_t remaining_lines = sampling_running_file_query_estimate_remaining_lines(j, lqs, jf, direction, msg_ut);
+ facets_update_estimations(facets, remaining_start_ut, remaining_end_ut, remaining_lines);
+ lqs->c.samples.estimated += remaining_lines;
+ lqs->c.samples_per_file.estimated += remaining_lines;
+}
+
+#endif //NETDATA_SYSTEMD_JOURNAL_SAMPLING_H
diff --git a/src/collectors/systemd-journal.plugin/systemd-journal-watcher.c b/src/collectors/systemd-journal.plugin/systemd-journal-watcher.c
index 6f12f154e..dd48ccc35 100644
--- a/src/collectors/systemd-journal.plugin/systemd-journal-watcher.c
+++ b/src/collectors/systemd-journal.plugin/systemd-journal-watcher.c
@@ -245,7 +245,7 @@ void process_event(Watcher *watcher, int inotifyFd, struct inotify_event *event)
"JOURNAL WATCHER: Received unhandled event with mask %u for directory '%s'",
event->mask, fullPath);
}
- else if(len > sizeof(".journal") - 1 && strcmp(&event->name[len - (sizeof(".journal") - 1)], ".journal") == 0) {
+ else if(is_journal_file(event->name, (ssize_t)len, NULL)) {
// It is a file that ends in .journal
// add it to our pending list
dictionary_set(watcher->pending, fullPath, NULL, 0);
diff --git a/src/collectors/systemd-journal.plugin/systemd-journal.c b/src/collectors/systemd-journal.plugin/systemd-journal.c
index 6da9c687e..9666e0109 100644
--- a/src/collectors/systemd-journal.plugin/systemd-journal.c
+++ b/src/collectors/systemd-journal.plugin/systemd-journal.c
@@ -5,53 +5,99 @@
* GPL v3+
*/
-#include "systemd-internals.h"
-
/*
* TODO
- *
* _UDEV_DEVLINK is frequently set more than once per field - support multi-value faces
*
*/
-#define FACET_MAX_VALUE_LENGTH 8192
+#include "systemd-internals.h"
#define SYSTEMD_JOURNAL_FUNCTION_DESCRIPTION "View, search and analyze systemd journal entries."
#define SYSTEMD_JOURNAL_FUNCTION_NAME "systemd-journal"
-#define SYSTEMD_JOURNAL_DEFAULT_TIMEOUT 60
-#define SYSTEMD_JOURNAL_MAX_PARAMS 1000
-#define SYSTEMD_JOURNAL_DEFAULT_QUERY_DURATION (1 * 3600)
-#define SYSTEMD_JOURNAL_DEFAULT_ITEMS_PER_QUERY 200
-#define SYSTEMD_JOURNAL_DEFAULT_ITEMS_SAMPLING 1000000
-#define SYSTEMD_JOURNAL_SAMPLING_SLOTS 1000
-#define SYSTEMD_JOURNAL_SAMPLING_RECALIBRATE 10000
+#define SYSTEMD_JOURNAL_SAMPLING_SLOTS 1000
+#define SYSTEMD_JOURNAL_SAMPLING_RECALIBRATE 10000
-#define SYSTEMD_JOURNAL_PROGRESS_EVERY_UT (250 * USEC_PER_MS)
+#ifdef HAVE_SD_JOURNAL_RESTART_FIELDS
+#define LQS_DEFAULT_SLICE_MODE 1
+#else
+#define LQS_DEFAULT_SLICE_MODE 0
+#endif
+
+// functions needed by LQS
+static SD_JOURNAL_FILE_SOURCE_TYPE get_internal_source_type(const char *value);
+
+// structures needed by LQS
+struct lqs_extension {
+ struct {
+ usec_t start_ut;
+ usec_t stop_ut;
+ usec_t first_msg_ut;
+
+ sd_id128_t first_msg_writer;
+ uint64_t first_msg_seqnum;
+ } query_file;
+
+ struct {
+ uint32_t enable_after_samples;
+ uint32_t slots;
+ uint32_t sampled;
+ uint32_t unsampled;
+ uint32_t estimated;
+ } samples;
+
+ struct {
+ uint32_t enable_after_samples;
+ uint32_t every;
+ uint32_t skipped;
+ uint32_t recalibrate;
+ uint32_t sampled;
+ uint32_t unsampled;
+ uint32_t estimated;
+ } samples_per_file;
+
+ struct {
+ usec_t start_ut;
+ usec_t end_ut;
+ usec_t step_ut;
+ uint32_t enable_after_samples;
+ uint32_t sampled[SYSTEMD_JOURNAL_SAMPLING_SLOTS];
+ uint32_t unsampled[SYSTEMD_JOURNAL_SAMPLING_SLOTS];
+ } samples_per_time_slot;
+
+ // per file progress info
+ // size_t cached_count;
-#define JOURNAL_PARAMETER_HELP "help"
-#define JOURNAL_PARAMETER_AFTER "after"
-#define JOURNAL_PARAMETER_BEFORE "before"
-#define JOURNAL_PARAMETER_ANCHOR "anchor"
-#define JOURNAL_PARAMETER_LAST "last"
-#define JOURNAL_PARAMETER_QUERY "query"
-#define JOURNAL_PARAMETER_FACETS "facets"
-#define JOURNAL_PARAMETER_HISTOGRAM "histogram"
-#define JOURNAL_PARAMETER_DIRECTION "direction"
-#define JOURNAL_PARAMETER_IF_MODIFIED_SINCE "if_modified_since"
-#define JOURNAL_PARAMETER_DATA_ONLY "data_only"
-#define JOURNAL_PARAMETER_SOURCE "source"
-#define JOURNAL_PARAMETER_INFO "info"
-#define JOURNAL_PARAMETER_SLICE "slice"
-#define JOURNAL_PARAMETER_DELTA "delta"
-#define JOURNAL_PARAMETER_TAIL "tail"
-#define JOURNAL_PARAMETER_SAMPLING "sampling"
+ // progress statistics
+ usec_t matches_setup_ut;
+ size_t rows_useful;
+ size_t rows_read;
+ size_t bytes_read;
+ size_t files_matched;
+ size_t file_working;
+};
+
+// prepare LQS
+#define LQS_FUNCTION_NAME SYSTEMD_JOURNAL_FUNCTION_NAME
+#define LQS_FUNCTION_DESCRIPTION SYSTEMD_JOURNAL_FUNCTION_DESCRIPTION
+#define LQS_DEFAULT_ITEMS_PER_QUERY 200
+#define LQS_DEFAULT_ITEMS_SAMPLING 1000000
+#define LQS_SOURCE_TYPE SD_JOURNAL_FILE_SOURCE_TYPE
+#define LQS_SOURCE_TYPE_ALL SDJF_ALL
+#define LQS_SOURCE_TYPE_NONE SDJF_NONE
+#define LQS_PARAMETER_SOURCE_NAME "Journal Sources" // this is how it is shown to users
+#define LQS_FUNCTION_GET_INTERNAL_SOURCE_TYPE(value) get_internal_source_type(value)
+#define LQS_FUNCTION_SOURCE_TO_JSON_ARRAY(wb) available_journal_file_sources_to_json_array(wb)
+#include "libnetdata/facets/logs_query_status.h"
+
+#include "systemd-journal-sampling.h"
+#define FACET_MAX_VALUE_LENGTH 8192
+#define SYSTEMD_JOURNAL_DEFAULT_TIMEOUT 60
+#define SYSTEMD_JOURNAL_PROGRESS_EVERY_UT (250 * USEC_PER_MS)
#define JOURNAL_KEY_ND_JOURNAL_FILE "ND_JOURNAL_FILE"
#define JOURNAL_KEY_ND_JOURNAL_PROCESS "ND_JOURNAL_PROCESS"
-
-#define JOURNAL_DEFAULT_SLICE_MODE true
#define JOURNAL_DEFAULT_DIRECTION FACETS_ANCHOR_DIRECTION_BACKWARD
-
#define SYSTEMD_ALWAYS_VISIBLE_KEYS NULL
#define SYSTEMD_KEYS_EXCLUDED_FROM_FACETS \
@@ -182,100 +228,27 @@
// ----------------------------------------------------------------------------
-typedef struct function_query_status {
- bool *cancelled; // a pointer to the cancelling boolean
- usec_t *stop_monotonic_ut;
-
- // request
- const char *transaction;
-
- SD_JOURNAL_FILE_SOURCE_TYPE source_type;
- SIMPLE_PATTERN *sources;
- usec_t after_ut;
- usec_t before_ut;
-
- struct {
- usec_t start_ut;
- usec_t stop_ut;
- } anchor;
-
- FACETS_ANCHOR_DIRECTION direction;
- size_t entries;
- usec_t if_modified_since;
- bool delta;
- bool tail;
- bool data_only;
- bool slice;
- size_t sampling;
- size_t filters;
- usec_t last_modified;
- const char *query;
- const char *histogram;
-
- struct {
- usec_t start_ut; // the starting time of the query - we start from this
- usec_t stop_ut; // the ending time of the query - we stop at this
- usec_t first_msg_ut;
-
- sd_id128_t first_msg_writer;
- uint64_t first_msg_seqnum;
- } query_file;
-
- struct {
- uint32_t enable_after_samples;
- uint32_t slots;
- uint32_t sampled;
- uint32_t unsampled;
- uint32_t estimated;
- } samples;
-
- struct {
- uint32_t enable_after_samples;
- uint32_t every;
- uint32_t skipped;
- uint32_t recalibrate;
- uint32_t sampled;
- uint32_t unsampled;
- uint32_t estimated;
- } samples_per_file;
-
- struct {
- usec_t start_ut;
- usec_t end_ut;
- usec_t step_ut;
- uint32_t enable_after_samples;
- uint32_t sampled[SYSTEMD_JOURNAL_SAMPLING_SLOTS];
- uint32_t unsampled[SYSTEMD_JOURNAL_SAMPLING_SLOTS];
- } samples_per_time_slot;
-
- // per file progress info
- // size_t cached_count;
-
- // progress statistics
- usec_t matches_setup_ut;
- size_t rows_useful;
- size_t rows_read;
- size_t bytes_read;
- size_t files_matched;
- size_t file_working;
-} FUNCTION_QUERY_STATUS;
-
-static void log_fqs(FUNCTION_QUERY_STATUS *fqs, const char *msg) {
- netdata_log_error("ERROR: %s, on query "
- "timeframe [%"PRIu64" - %"PRIu64"], "
- "anchor [%"PRIu64" - %"PRIu64"], "
- "if_modified_since %"PRIu64", "
- "data_only:%s, delta:%s, tail:%s, direction:%s"
- , msg
- , fqs->after_ut, fqs->before_ut
- , fqs->anchor.start_ut, fqs->anchor.stop_ut
- , fqs->if_modified_since
- , fqs->data_only ? "true" : "false"
- , fqs->delta ? "true" : "false"
- , fqs->tail ? "tail" : "false"
- , fqs->direction == FACETS_ANCHOR_DIRECTION_FORWARD ? "forward" : "backward");
+static SD_JOURNAL_FILE_SOURCE_TYPE get_internal_source_type(const char *value) {
+ if(strcmp(value, SDJF_SOURCE_ALL_NAME) == 0)
+ return SDJF_ALL;
+ else if(strcmp(value, SDJF_SOURCE_LOCAL_NAME) == 0)
+ return SDJF_LOCAL_ALL;
+ else if(strcmp(value, SDJF_SOURCE_REMOTES_NAME) == 0)
+ return SDJF_REMOTE_ALL;
+ else if(strcmp(value, SDJF_SOURCE_NAMESPACES_NAME) == 0)
+ return SDJF_LOCAL_NAMESPACE;
+ else if(strcmp(value, SDJF_SOURCE_LOCAL_SYSTEM_NAME) == 0)
+ return SDJF_LOCAL_SYSTEM;
+ else if(strcmp(value, SDJF_SOURCE_LOCAL_USERS_NAME) == 0)
+ return SDJF_LOCAL_USER;
+ else if(strcmp(value, SDJF_SOURCE_LOCAL_OTHER_NAME) == 0)
+ return SDJF_LOCAL_OTHER;
+
+ return SDJF_NONE;
}
+// ----------------------------------------------------------------------------
+
static inline bool netdata_systemd_journal_seek_to(sd_journal *j, usec_t timestamp) {
if(sd_journal_seek_realtime_usec(j, timestamp) < 0) {
netdata_log_error("SYSTEMD-JOURNAL: Failed to seek to %" PRIu64, timestamp);
@@ -291,367 +264,6 @@ static inline bool netdata_systemd_journal_seek_to(sd_journal *j, usec_t timesta
#define JD_SOURCE_REALTIME_TIMESTAMP "_SOURCE_REALTIME_TIMESTAMP"
// ----------------------------------------------------------------------------
-// sampling support
-
-static void sampling_query_init(FUNCTION_QUERY_STATUS *fqs, FACETS *facets) {
- if(!fqs->sampling)
- return;
-
- if(!fqs->slice) {
- // the user is doing a full data query
- // disable sampling
- fqs->sampling = 0;
- return;
- }
-
- if(fqs->data_only) {
- // the user is doing a data query
- // disable sampling
- fqs->sampling = 0;
- return;
- }
-
- if(!fqs->files_matched) {
- // no files have been matched
- // disable sampling
- fqs->sampling = 0;
- return;
- }
-
- fqs->samples.slots = facets_histogram_slots(facets);
- if(fqs->samples.slots < 2) fqs->samples.slots = 2;
- if(fqs->samples.slots > SYSTEMD_JOURNAL_SAMPLING_SLOTS)
- fqs->samples.slots = SYSTEMD_JOURNAL_SAMPLING_SLOTS;
-
- if(!fqs->after_ut || !fqs->before_ut || fqs->after_ut >= fqs->before_ut) {
- // we don't have enough information for sampling
- fqs->sampling = 0;
- return;
- }
-
- usec_t delta = fqs->before_ut - fqs->after_ut;
- usec_t step = delta / facets_histogram_slots(facets) - 1;
- if(step < 1) step = 1;
-
- fqs->samples_per_time_slot.start_ut = fqs->after_ut;
- fqs->samples_per_time_slot.end_ut = fqs->before_ut;
- fqs->samples_per_time_slot.step_ut = step;
-
- // the minimum number of rows to enable sampling
- fqs->samples.enable_after_samples = fqs->sampling / 2;
-
- size_t files_matched = fqs->files_matched;
- if(!files_matched)
- files_matched = 1;
-
- // the minimum number of rows per file to enable sampling
- fqs->samples_per_file.enable_after_samples = (fqs->sampling / 4) / files_matched;
- if(fqs->samples_per_file.enable_after_samples < fqs->entries)
- fqs->samples_per_file.enable_after_samples = fqs->entries;
-
- // the minimum number of rows per time slot to enable sampling
- fqs->samples_per_time_slot.enable_after_samples = (fqs->sampling / 4) / fqs->samples.slots;
- if(fqs->samples_per_time_slot.enable_after_samples < fqs->entries)
- fqs->samples_per_time_slot.enable_after_samples = fqs->entries;
-}
-
-static void sampling_file_init(FUNCTION_QUERY_STATUS *fqs, struct journal_file *jf __maybe_unused) {
- fqs->samples_per_file.sampled = 0;
- fqs->samples_per_file.unsampled = 0;
- fqs->samples_per_file.estimated = 0;
- fqs->samples_per_file.every = 0;
- fqs->samples_per_file.skipped = 0;
- fqs->samples_per_file.recalibrate = 0;
-}
-
-static size_t sampling_file_lines_scanned_so_far(FUNCTION_QUERY_STATUS *fqs) {
- size_t sampled = fqs->samples_per_file.sampled + fqs->samples_per_file.unsampled;
- if(!sampled) sampled = 1;
- return sampled;
-}
-
-static void sampling_running_file_query_overlapping_timeframe_ut(
- FUNCTION_QUERY_STATUS *fqs, struct journal_file *jf, FACETS_ANCHOR_DIRECTION direction,
- usec_t msg_ut, usec_t *after_ut, usec_t *before_ut) {
-
- // find the overlap of the query and file timeframes
- // taking into account the first message we encountered
-
- usec_t oldest_ut, newest_ut;
- if(direction == FACETS_ANCHOR_DIRECTION_FORWARD) {
- // the first message we know (oldest)
- oldest_ut = fqs->query_file.first_msg_ut ? fqs->query_file.first_msg_ut : jf->msg_first_ut;
- if(!oldest_ut) oldest_ut = fqs->query_file.start_ut;
-
- if(jf->msg_last_ut)
- newest_ut = MIN(fqs->query_file.stop_ut, jf->msg_last_ut);
- else if(jf->file_last_modified_ut)
- newest_ut = MIN(fqs->query_file.stop_ut, jf->file_last_modified_ut);
- else
- newest_ut = fqs->query_file.stop_ut;
-
- if(msg_ut < oldest_ut)
- oldest_ut = msg_ut - 1;
- }
- else /* BACKWARD */ {
- // the latest message we know (newest)
- newest_ut = fqs->query_file.first_msg_ut ? fqs->query_file.first_msg_ut : jf->msg_last_ut;
- if(!newest_ut) newest_ut = fqs->query_file.start_ut;
-
- if(jf->msg_first_ut)
- oldest_ut = MAX(fqs->query_file.stop_ut, jf->msg_first_ut);
- else
- oldest_ut = fqs->query_file.stop_ut;
-
- if(newest_ut < msg_ut)
- newest_ut = msg_ut + 1;
- }
-
- *after_ut = oldest_ut;
- *before_ut = newest_ut;
-}
-
-static double sampling_running_file_query_progress_by_time(FUNCTION_QUERY_STATUS *fqs, struct journal_file *jf,
- FACETS_ANCHOR_DIRECTION direction, usec_t msg_ut) {
-
- usec_t after_ut, before_ut, elapsed_ut;
- sampling_running_file_query_overlapping_timeframe_ut(fqs, jf, direction, msg_ut, &after_ut, &before_ut);
-
- if(direction == FACETS_ANCHOR_DIRECTION_FORWARD)
- elapsed_ut = msg_ut - after_ut;
- else
- elapsed_ut = before_ut - msg_ut;
-
- usec_t total_ut = before_ut - after_ut;
- double progress = (double)elapsed_ut / (double)total_ut;
-
- return progress;
-}
-
-static usec_t sampling_running_file_query_remaining_time(FUNCTION_QUERY_STATUS *fqs, struct journal_file *jf,
- FACETS_ANCHOR_DIRECTION direction, usec_t msg_ut,
- usec_t *total_time_ut, usec_t *remaining_start_ut,
- usec_t *remaining_end_ut) {
- usec_t after_ut, before_ut;
- sampling_running_file_query_overlapping_timeframe_ut(fqs, jf, direction, msg_ut, &after_ut, &before_ut);
-
- // since we have a timestamp in msg_ut
- // this timestamp can extend the overlap
- if(msg_ut <= after_ut)
- after_ut = msg_ut - 1;
-
- if(msg_ut >= before_ut)
- before_ut = msg_ut + 1;
-
- // return the remaining duration
- usec_t remaining_from_ut, remaining_to_ut;
- if(direction == FACETS_ANCHOR_DIRECTION_FORWARD) {
- remaining_from_ut = msg_ut;
- remaining_to_ut = before_ut;
- }
- else {
- remaining_from_ut = after_ut;
- remaining_to_ut = msg_ut;
- }
-
- usec_t remaining_ut = remaining_to_ut - remaining_from_ut;
-
- if(total_time_ut)
- *total_time_ut = (before_ut > after_ut) ? before_ut - after_ut : 1;
-
- if(remaining_start_ut)
- *remaining_start_ut = remaining_from_ut;
-
- if(remaining_end_ut)
- *remaining_end_ut = remaining_to_ut;
-
- return remaining_ut;
-}
-
-static size_t sampling_running_file_query_estimate_remaining_lines_by_time(FUNCTION_QUERY_STATUS *fqs,
- struct journal_file *jf,
- FACETS_ANCHOR_DIRECTION direction,
- usec_t msg_ut) {
- size_t scanned_lines = sampling_file_lines_scanned_so_far(fqs);
-
- // Calculate the proportion of time covered
- usec_t total_time_ut, remaining_start_ut, remaining_end_ut;
- usec_t remaining_time_ut = sampling_running_file_query_remaining_time(fqs, jf, direction, msg_ut, &total_time_ut,
- &remaining_start_ut, &remaining_end_ut);
- if (total_time_ut == 0) total_time_ut = 1;
-
- double proportion_by_time = (double) (total_time_ut - remaining_time_ut) / (double) total_time_ut;
-
- if (proportion_by_time == 0 || proportion_by_time > 1.0 || !isfinite(proportion_by_time))
- proportion_by_time = 1.0;
-
- // Estimate the total number of lines in the file
- size_t expected_matching_logs_by_time = (size_t)((double)scanned_lines / proportion_by_time);
-
- if(jf->messages_in_file && expected_matching_logs_by_time > jf->messages_in_file)
- expected_matching_logs_by_time = jf->messages_in_file;
-
- // Calculate the estimated number of remaining lines
- size_t remaining_logs_by_time = expected_matching_logs_by_time - scanned_lines;
- if (remaining_logs_by_time < 1) remaining_logs_by_time = 1;
-
-// nd_log(NDLS_COLLECTORS, NDLP_INFO,
-// "JOURNAL ESTIMATION: '%s' "
-// "scanned_lines=%zu [sampled=%zu, unsampled=%zu, estimated=%zu], "
-// "file [%"PRIu64" - %"PRIu64", duration %"PRId64", known lines in file %zu], "
-// "query [%"PRIu64" - %"PRIu64", duration %"PRId64"], "
-// "first message read from the file at %"PRIu64", current message at %"PRIu64", "
-// "proportion of time %.2f %%, "
-// "expected total lines in file %zu, "
-// "remaining lines %zu, "
-// "remaining time %"PRIu64" [%"PRIu64" - %"PRIu64", duration %"PRId64"]"
-// , jf->filename
-// , scanned_lines, fqs->samples_per_file.sampled, fqs->samples_per_file.unsampled, fqs->samples_per_file.estimated
-// , jf->msg_first_ut, jf->msg_last_ut, jf->msg_last_ut - jf->msg_first_ut, jf->messages_in_file
-// , fqs->query_file.start_ut, fqs->query_file.stop_ut, fqs->query_file.stop_ut - fqs->query_file.start_ut
-// , fqs->query_file.first_msg_ut, msg_ut
-// , proportion_by_time * 100.0
-// , expected_matching_logs_by_time
-// , remaining_logs_by_time
-// , remaining_time_ut, remaining_start_ut, remaining_end_ut, remaining_end_ut - remaining_start_ut
-// );
-
- return remaining_logs_by_time;
-}
-
-static size_t sampling_running_file_query_estimate_remaining_lines(sd_journal *j __maybe_unused, FUNCTION_QUERY_STATUS *fqs, struct journal_file *jf, FACETS_ANCHOR_DIRECTION direction, usec_t msg_ut) {
- size_t remaining_logs_by_seqnum = 0;
-
-#ifdef HAVE_SD_JOURNAL_GET_SEQNUM
- size_t expected_matching_logs_by_seqnum = 0;
- double proportion_by_seqnum = 0.0;
- uint64_t current_msg_seqnum;
- sd_id128_t current_msg_writer;
- if(!fqs->query_file.first_msg_seqnum || sd_journal_get_seqnum(j, &current_msg_seqnum, &current_msg_writer) < 0) {
- fqs->query_file.first_msg_seqnum = 0;
- fqs->query_file.first_msg_writer = SD_ID128_NULL;
- }
- else if(jf->messages_in_file) {
- size_t scanned_lines = sampling_file_lines_scanned_so_far(fqs);
-
- double proportion_of_all_lines_so_far;
- if(direction == FACETS_ANCHOR_DIRECTION_FORWARD)
- proportion_of_all_lines_so_far = (double)scanned_lines / (double)(current_msg_seqnum - jf->first_seqnum);
- else
- proportion_of_all_lines_so_far = (double)scanned_lines / (double)(jf->last_seqnum - current_msg_seqnum);
-
- if(proportion_of_all_lines_so_far > 1.0)
- proportion_of_all_lines_so_far = 1.0;
-
- expected_matching_logs_by_seqnum = (size_t)(proportion_of_all_lines_so_far * (double)jf->messages_in_file);
-
- proportion_by_seqnum = (double)scanned_lines / (double)expected_matching_logs_by_seqnum;
-
- if (proportion_by_seqnum == 0 || proportion_by_seqnum > 1.0 || !isfinite(proportion_by_seqnum))
- proportion_by_seqnum = 1.0;
-
- remaining_logs_by_seqnum = expected_matching_logs_by_seqnum - scanned_lines;
- if(!remaining_logs_by_seqnum) remaining_logs_by_seqnum = 1;
- }
-#endif
-
- if(remaining_logs_by_seqnum)
- return remaining_logs_by_seqnum;
-
- return sampling_running_file_query_estimate_remaining_lines_by_time(fqs, jf, direction, msg_ut);
-}
-
-static void sampling_decide_file_sampling_every(sd_journal *j, FUNCTION_QUERY_STATUS *fqs, struct journal_file *jf, FACETS_ANCHOR_DIRECTION direction, usec_t msg_ut) {
- size_t files_matched = fqs->files_matched;
- if(!files_matched) files_matched = 1;
-
- size_t remaining_lines = sampling_running_file_query_estimate_remaining_lines(j, fqs, jf, direction, msg_ut);
- size_t wanted_samples = (fqs->sampling / 2) / files_matched;
- if(!wanted_samples) wanted_samples = 1;
-
- fqs->samples_per_file.every = remaining_lines / wanted_samples;
-
- if(fqs->samples_per_file.every < 1)
- fqs->samples_per_file.every = 1;
-}
-
-typedef enum {
- SAMPLING_STOP_AND_ESTIMATE = -1,
- SAMPLING_FULL = 0,
- SAMPLING_SKIP_FIELDS = 1,
-} sampling_t;
-
-static inline sampling_t is_row_in_sample(sd_journal *j, FUNCTION_QUERY_STATUS *fqs, struct journal_file *jf, usec_t msg_ut, FACETS_ANCHOR_DIRECTION direction, bool candidate_to_keep) {
- if(!fqs->sampling || candidate_to_keep)
- return SAMPLING_FULL;
-
- if(unlikely(msg_ut < fqs->samples_per_time_slot.start_ut))
- msg_ut = fqs->samples_per_time_slot.start_ut;
- if(unlikely(msg_ut > fqs->samples_per_time_slot.end_ut))
- msg_ut = fqs->samples_per_time_slot.end_ut;
-
- size_t slot = (msg_ut - fqs->samples_per_time_slot.start_ut) / fqs->samples_per_time_slot.step_ut;
- if(slot >= fqs->samples.slots)
- slot = fqs->samples.slots - 1;
-
- bool should_sample = false;
-
- if(fqs->samples.sampled < fqs->samples.enable_after_samples ||
- fqs->samples_per_file.sampled < fqs->samples_per_file.enable_after_samples ||
- fqs->samples_per_time_slot.sampled[slot] < fqs->samples_per_time_slot.enable_after_samples)
- should_sample = true;
-
- else if(fqs->samples_per_file.recalibrate >= SYSTEMD_JOURNAL_SAMPLING_RECALIBRATE || !fqs->samples_per_file.every) {
- // this is the first to be unsampled for this file
- sampling_decide_file_sampling_every(j, fqs, jf, direction, msg_ut);
- fqs->samples_per_file.recalibrate = 0;
- should_sample = true;
- }
- else {
- // we sample 1 every fqs->samples_per_file.every
- if(fqs->samples_per_file.skipped >= fqs->samples_per_file.every) {
- fqs->samples_per_file.skipped = 0;
- should_sample = true;
- }
- else
- fqs->samples_per_file.skipped++;
- }
-
- if(should_sample) {
- fqs->samples.sampled++;
- fqs->samples_per_file.sampled++;
- fqs->samples_per_time_slot.sampled[slot]++;
-
- return SAMPLING_FULL;
- }
-
- fqs->samples_per_file.recalibrate++;
-
- fqs->samples.unsampled++;
- fqs->samples_per_file.unsampled++;
- fqs->samples_per_time_slot.unsampled[slot]++;
-
- if(fqs->samples_per_file.unsampled > fqs->samples_per_file.sampled) {
- double progress_by_time = sampling_running_file_query_progress_by_time(fqs, jf, direction, msg_ut);
-
- if(progress_by_time > SYSTEMD_JOURNAL_ENABLE_ESTIMATIONS_FILE_PERCENTAGE)
- return SAMPLING_STOP_AND_ESTIMATE;
- }
-
- return SAMPLING_SKIP_FIELDS;
-}
-
-static void sampling_update_running_query_file_estimates(FACETS *facets, sd_journal *j, FUNCTION_QUERY_STATUS *fqs, struct journal_file *jf, usec_t msg_ut, FACETS_ANCHOR_DIRECTION direction) {
- usec_t total_time_ut, remaining_start_ut, remaining_end_ut;
- sampling_running_file_query_remaining_time(fqs, jf, direction, msg_ut, &total_time_ut, &remaining_start_ut,
- &remaining_end_ut);
- size_t remaining_lines = sampling_running_file_query_estimate_remaining_lines(j, fqs, jf, direction, msg_ut);
- facets_update_estimations(facets, remaining_start_ut, remaining_end_ut, remaining_lines);
- fqs->samples.estimated += remaining_lines;
- fqs->samples_per_file.estimated += remaining_lines;
-}
-
-// ----------------------------------------------------------------------------
static inline size_t netdata_systemd_journal_process_row(sd_journal *j, FACETS *facets, struct journal_file *jf, usec_t *msg_ut) {
const void *data;
@@ -721,16 +333,17 @@ static inline ND_SD_JOURNAL_STATUS check_stop(const bool *cancelled, const usec_
ND_SD_JOURNAL_STATUS netdata_systemd_journal_query_backward(
sd_journal *j, BUFFER *wb __maybe_unused, FACETS *facets,
- struct journal_file *jf, FUNCTION_QUERY_STATUS *fqs) {
+ struct journal_file *jf,
+ LOGS_QUERY_STATUS *fqs) {
usec_t anchor_delta = __atomic_load_n(&jf->max_journal_vs_realtime_delta_ut, __ATOMIC_RELAXED);
+ lqs_query_timeframe(fqs, anchor_delta);
+ usec_t start_ut = fqs->query.start_ut;
+ usec_t stop_ut = fqs->query.stop_ut;
+ bool stop_when_full = fqs->query.stop_when_full;
- usec_t start_ut = ((fqs->data_only && fqs->anchor.start_ut) ? fqs->anchor.start_ut : fqs->before_ut) + anchor_delta;
- usec_t stop_ut = (fqs->data_only && fqs->anchor.stop_ut) ? fqs->anchor.stop_ut : fqs->after_ut;
- bool stop_when_full = (fqs->data_only && !fqs->anchor.stop_ut);
-
- fqs->query_file.start_ut = start_ut;
- fqs->query_file.stop_ut = stop_ut;
+ fqs->c.query_file.start_ut = start_ut;
+ fqs->c.query_file.stop_ut = stop_ut;
if(!netdata_systemd_journal_seek_to(j, start_ut))
return ND_SD_JOURNAL_FAILED_TO_SEEK;
@@ -765,12 +378,12 @@ ND_SD_JOURNAL_STATUS netdata_systemd_journal_query_backward(
if(unlikely(!first_msg_ut)) {
first_msg_ut = msg_ut;
- fqs->query_file.first_msg_ut = msg_ut;
+ fqs->c.query_file.first_msg_ut = msg_ut;
#ifdef HAVE_SD_JOURNAL_GET_SEQNUM
- if(sd_journal_get_seqnum(j, &fqs->query_file.first_msg_seqnum, &fqs->query_file.first_msg_writer) < 0) {
- fqs->query_file.first_msg_seqnum = 0;
- fqs->query_file.first_msg_writer = SD_ID128_NULL;
+ if(sd_journal_get_seqnum(j, &fqs->c.query_file.first_msg_seqnum, &fqs->c.query_file.first_msg_writer) < 0) {
+ fqs->c.query_file.first_msg_seqnum = 0;
+ fqs->c.query_file.first_msg_writer = SD_ID128_NULL;
}
#endif
}
@@ -794,7 +407,7 @@ ND_SD_JOURNAL_STATUS netdata_systemd_journal_query_backward(
row_counter++;
if(unlikely((row_counter % FUNCTION_DATA_ONLY_CHECK_EVERY_ROWS) == 0 &&
stop_when_full &&
- facets_rows(facets) >= fqs->entries)) {
+ facets_rows(facets) >= fqs->rq.entries)) {
// stop the data only query
usec_t oldest = facets_row_oldest_ut(facets);
if(oldest && msg_ut < (oldest - anchor_delta))
@@ -802,10 +415,10 @@ ND_SD_JOURNAL_STATUS netdata_systemd_journal_query_backward(
}
if(unlikely(row_counter % FUNCTION_PROGRESS_EVERY_ROWS == 0)) {
- FUNCTION_PROGRESS_UPDATE_ROWS(fqs->rows_read, row_counter - last_row_counter);
+ FUNCTION_PROGRESS_UPDATE_ROWS(fqs->c.rows_read, row_counter - last_row_counter);
last_row_counter = row_counter;
- FUNCTION_PROGRESS_UPDATE_BYTES(fqs->bytes_read, bytes - last_bytes);
+ FUNCTION_PROGRESS_UPDATE_BYTES(fqs->c.bytes_read, bytes - last_bytes);
last_bytes = bytes;
status = check_stop(fqs->cancelled, fqs->stop_monotonic_ut);
@@ -819,10 +432,10 @@ ND_SD_JOURNAL_STATUS netdata_systemd_journal_query_backward(
}
}
- FUNCTION_PROGRESS_UPDATE_ROWS(fqs->rows_read, row_counter - last_row_counter);
- FUNCTION_PROGRESS_UPDATE_BYTES(fqs->bytes_read, bytes - last_bytes);
+ FUNCTION_PROGRESS_UPDATE_ROWS(fqs->c.rows_read, row_counter - last_row_counter);
+ FUNCTION_PROGRESS_UPDATE_BYTES(fqs->c.bytes_read, bytes - last_bytes);
- fqs->rows_useful += rows_useful;
+ fqs->c.rows_useful += rows_useful;
if(errors_no_timestamp)
netdata_log_error("SYSTEMD-JOURNAL: %zu lines did not have timestamps", errors_no_timestamp);
@@ -835,16 +448,17 @@ ND_SD_JOURNAL_STATUS netdata_systemd_journal_query_backward(
ND_SD_JOURNAL_STATUS netdata_systemd_journal_query_forward(
sd_journal *j, BUFFER *wb __maybe_unused, FACETS *facets,
- struct journal_file *jf, FUNCTION_QUERY_STATUS *fqs) {
+ struct journal_file *jf,
+ LOGS_QUERY_STATUS *fqs) {
usec_t anchor_delta = __atomic_load_n(&jf->max_journal_vs_realtime_delta_ut, __ATOMIC_RELAXED);
+ lqs_query_timeframe(fqs, anchor_delta);
+ usec_t start_ut = fqs->query.start_ut;
+ usec_t stop_ut = fqs->query.stop_ut;
+ bool stop_when_full = fqs->query.stop_when_full;
- usec_t start_ut = (fqs->data_only && fqs->anchor.start_ut) ? fqs->anchor.start_ut : fqs->after_ut;
- usec_t stop_ut = ((fqs->data_only && fqs->anchor.stop_ut) ? fqs->anchor.stop_ut : fqs->before_ut) + anchor_delta;
- bool stop_when_full = (fqs->data_only && !fqs->anchor.stop_ut);
-
- fqs->query_file.start_ut = start_ut;
- fqs->query_file.stop_ut = stop_ut;
+ fqs->c.query_file.start_ut = start_ut;
+ fqs->c.query_file.stop_ut = stop_ut;
if(!netdata_systemd_journal_seek_to(j, start_ut))
return ND_SD_JOURNAL_FAILED_TO_SEEK;
@@ -879,7 +493,7 @@ ND_SD_JOURNAL_STATUS netdata_systemd_journal_query_forward(
if(unlikely(!first_msg_ut)) {
first_msg_ut = msg_ut;
- fqs->query_file.first_msg_ut = msg_ut;
+ fqs->c.query_file.first_msg_ut = msg_ut;
}
sampling_t sample = is_row_in_sample(j, fqs, jf, msg_ut,
@@ -901,7 +515,7 @@ ND_SD_JOURNAL_STATUS netdata_systemd_journal_query_forward(
row_counter++;
if(unlikely((row_counter % FUNCTION_DATA_ONLY_CHECK_EVERY_ROWS) == 0 &&
stop_when_full &&
- facets_rows(facets) >= fqs->entries)) {
+ facets_rows(facets) >= fqs->rq.entries)) {
// stop the data only query
usec_t newest = facets_row_newest_ut(facets);
if(newest && msg_ut > (newest + anchor_delta))
@@ -909,10 +523,10 @@ ND_SD_JOURNAL_STATUS netdata_systemd_journal_query_forward(
}
if(unlikely(row_counter % FUNCTION_PROGRESS_EVERY_ROWS == 0)) {
- FUNCTION_PROGRESS_UPDATE_ROWS(fqs->rows_read, row_counter - last_row_counter);
+ FUNCTION_PROGRESS_UPDATE_ROWS(fqs->c.rows_read, row_counter - last_row_counter);
last_row_counter = row_counter;
- FUNCTION_PROGRESS_UPDATE_BYTES(fqs->bytes_read, bytes - last_bytes);
+ FUNCTION_PROGRESS_UPDATE_BYTES(fqs->c.bytes_read, bytes - last_bytes);
last_bytes = bytes;
status = check_stop(fqs->cancelled, fqs->stop_monotonic_ut);
@@ -926,10 +540,10 @@ ND_SD_JOURNAL_STATUS netdata_systemd_journal_query_forward(
}
}
- FUNCTION_PROGRESS_UPDATE_ROWS(fqs->rows_read, row_counter - last_row_counter);
- FUNCTION_PROGRESS_UPDATE_BYTES(fqs->bytes_read, bytes - last_bytes);
+ FUNCTION_PROGRESS_UPDATE_ROWS(fqs->c.rows_read, row_counter - last_row_counter);
+ FUNCTION_PROGRESS_UPDATE_BYTES(fqs->c.bytes_read, bytes - last_bytes);
- fqs->rows_useful += rows_useful;
+ fqs->c.rows_useful += rows_useful;
if(errors_no_timestamp)
netdata_log_error("SYSTEMD-JOURNAL: %zu lines did not have timestamps", errors_no_timestamp);
@@ -963,7 +577,7 @@ bool netdata_systemd_journal_check_if_modified_since(sd_journal *j, usec_t seek_
}
#ifdef HAVE_SD_JOURNAL_RESTART_FIELDS
-static bool netdata_systemd_filtering_by_journal(sd_journal *j, FACETS *facets, FUNCTION_QUERY_STATUS *fqs) {
+static bool netdata_systemd_filtering_by_journal(sd_journal *j, FACETS *facets, LOGS_QUERY_STATUS *lqs) {
const char *field = NULL;
const void *data = NULL;
size_t data_length;
@@ -974,7 +588,7 @@ static bool netdata_systemd_filtering_by_journal(sd_journal *j, FACETS *facets,
SD_JOURNAL_FOREACH_FIELD(j, field) { // for each key
bool interesting;
- if(fqs->data_only)
+ if(lqs->rq.data_only)
interesting = facets_key_name_is_filter(facets, field);
else
interesting = facets_key_name_is_facet(facets, field);
@@ -1023,7 +637,7 @@ static bool netdata_systemd_filtering_by_journal(sd_journal *j, FACETS *facets,
}
if(failures) {
- log_fqs(fqs, "failed to setup journal filter, will run the full query.");
+ lqs_log_error(lqs, "failed to setup journal filter, will run the full query.");
sd_journal_flush_matches(j);
return true;
}
@@ -1034,7 +648,8 @@ static bool netdata_systemd_filtering_by_journal(sd_journal *j, FACETS *facets,
static ND_SD_JOURNAL_STATUS netdata_systemd_journal_query_one_file(
const char *filename, BUFFER *wb, FACETS *facets,
- struct journal_file *jf, FUNCTION_QUERY_STATUS *fqs) {
+ struct journal_file *jf,
+ LOGS_QUERY_STATUS *fqs) {
sd_journal *j = NULL;
errno_clear();
@@ -1056,18 +671,18 @@ static ND_SD_JOURNAL_STATUS netdata_systemd_journal_query_one_file(
bool matches_filters = true;
#ifdef HAVE_SD_JOURNAL_RESTART_FIELDS
- if(fqs->slice) {
+ if(fqs->rq.slice) {
usec_t started = now_monotonic_usec();
- matches_filters = netdata_systemd_filtering_by_journal(j, facets, fqs) || !fqs->filters;
+ matches_filters = netdata_systemd_filtering_by_journal(j, facets, fqs) || !fqs->rq.filters;
usec_t ended = now_monotonic_usec();
- fqs->matches_setup_ut += (ended - started);
+ fqs->c.matches_setup_ut += (ended - started);
}
#endif // HAVE_SD_JOURNAL_RESTART_FIELDS
if(matches_filters) {
- if(fqs->direction == FACETS_ANCHOR_DIRECTION_FORWARD)
+ if(fqs->rq.direction == FACETS_ANCHOR_DIRECTION_FORWARD)
status = netdata_systemd_journal_query_forward(j, wb, facets, jf, fqs);
else
status = netdata_systemd_journal_query_backward(j, wb, facets, jf, fqs);
@@ -1081,10 +696,10 @@ static ND_SD_JOURNAL_STATUS netdata_systemd_journal_query_one_file(
return status;
}
-static bool jf_is_mine(struct journal_file *jf, FUNCTION_QUERY_STATUS *fqs) {
+static bool jf_is_mine(struct journal_file *jf, LOGS_QUERY_STATUS *fqs) {
- if((fqs->source_type == SDJF_NONE && !fqs->sources) || (jf->source_type & fqs->source_type) ||
- (fqs->sources && simple_pattern_matches(fqs->sources, string2str(jf->source)))) {
+ if((fqs->rq.source_type == SDJF_NONE && !fqs->rq.sources) || (jf->source_type & fqs->rq.source_type) ||
+ (fqs->rq.sources && simple_pattern_matches(fqs->rq.sources, string2str(jf->source)))) {
if(!jf->msg_last_ut)
// the file is not scanned yet, or the timestamps have not been updated,
@@ -1095,22 +710,24 @@ static bool jf_is_mine(struct journal_file *jf, FUNCTION_QUERY_STATUS *fqs) {
usec_t first_ut = jf->msg_first_ut - anchor_delta;
usec_t last_ut = jf->msg_last_ut + anchor_delta;
- if(last_ut >= fqs->after_ut && first_ut <= fqs->before_ut)
+ if(last_ut >= fqs->rq.after_ut && first_ut <= fqs->rq.before_ut)
return true;
}
return false;
}
-static int netdata_systemd_journal_query(BUFFER *wb, FACETS *facets, FUNCTION_QUERY_STATUS *fqs) {
+static int netdata_systemd_journal_query(BUFFER *wb, LOGS_QUERY_STATUS *lqs) {
+ FACETS *facets = lqs->facets;
+
ND_SD_JOURNAL_STATUS status = ND_SD_JOURNAL_NO_FILE_MATCHED;
struct journal_file *jf;
- fqs->files_matched = 0;
- fqs->file_working = 0;
- fqs->rows_useful = 0;
- fqs->rows_read = 0;
- fqs->bytes_read = 0;
+ lqs->c.files_matched = 0;
+ lqs->c.file_working = 0;
+ lqs->c.rows_useful = 0;
+ lqs->c.rows_read = 0;
+ lqs->c.bytes_read = 0;
size_t files_used = 0;
size_t files_max = dictionary_entries(journal_files_registry);
@@ -1119,26 +736,29 @@ static int netdata_systemd_journal_query(BUFFER *wb, FACETS *facets, FUNCTION_QU
// count the files
bool files_are_newer = false;
dfe_start_read(journal_files_registry, jf) {
- if(!jf_is_mine(jf, fqs))
+ if(!jf_is_mine(jf, lqs))
continue;
file_items[files_used++] = dictionary_acquired_item_dup(journal_files_registry, jf_dfe.item);
- if(jf->msg_last_ut > fqs->if_modified_since)
+ if(jf->msg_last_ut > lqs->rq.if_modified_since)
files_are_newer = true;
}
dfe_done(jf);
- fqs->files_matched = files_used;
+ lqs->c.files_matched = files_used;
+
+ if(lqs->rq.if_modified_since && !files_are_newer) {
+ // release the files
+ for(size_t f = 0; f < files_used ;f++)
+ dictionary_acquired_item_release(journal_files_registry, file_items[f]);
- if(fqs->if_modified_since && !files_are_newer) {
- buffer_flush(wb);
- return HTTP_RESP_NOT_MODIFIED;
+ return rrd_call_function_error(wb, "No new data since the previous call.", HTTP_RESP_NOT_MODIFIED);
}
// sort the files, so that they are optimal for facets
if(files_used >= 2) {
- if (fqs->direction == FACETS_ANCHOR_DIRECTION_BACKWARD)
+ if (lqs->rq.direction == FACETS_ANCHOR_DIRECTION_BACKWARD)
qsort(file_items, files_used, sizeof(const DICTIONARY_ITEM *),
journal_file_dict_items_backward_compar);
else
@@ -1153,38 +773,38 @@ static int netdata_systemd_journal_query(BUFFER *wb, FACETS *facets, FUNCTION_QU
usec_t duration_ut = 0, max_duration_ut = 0;
usec_t progress_duration_ut = 0;
- sampling_query_init(fqs, facets);
+ sampling_query_init(lqs, facets);
buffer_json_member_add_array(wb, "_journal_files");
for(size_t f = 0; f < files_used ;f++) {
const char *filename = dictionary_acquired_item_name(file_items[f]);
jf = dictionary_acquired_item_value(file_items[f]);
- if(!jf_is_mine(jf, fqs))
+ if(!jf_is_mine(jf, lqs))
continue;
started_ut = ended_ut;
// do not even try to do the query if we expect it to pass the timeout
- if(ended_ut + max_duration_ut * 3 >= *fqs->stop_monotonic_ut) {
+ if(ended_ut + max_duration_ut * 3 >= *lqs->stop_monotonic_ut) {
partial = true;
status = ND_SD_JOURNAL_TIMED_OUT;
break;
}
- fqs->file_working++;
+ lqs->c.file_working++;
// fqs->cached_count = 0;
size_t fs_calls = fstat_thread_calls;
size_t fs_cached = fstat_thread_cached_responses;
- size_t rows_useful = fqs->rows_useful;
- size_t rows_read = fqs->rows_read;
- size_t bytes_read = fqs->bytes_read;
- size_t matches_setup_ut = fqs->matches_setup_ut;
+ size_t rows_useful = lqs->c.rows_useful;
+ size_t rows_read = lqs->c.rows_read;
+ size_t bytes_read = lqs->c.bytes_read;
+ size_t matches_setup_ut = lqs->c.matches_setup_ut;
- sampling_file_init(fqs, jf);
+ sampling_file_init(lqs, jf);
- ND_SD_JOURNAL_STATUS tmp_status = netdata_systemd_journal_query_one_file(filename, wb, facets, jf, fqs);
+ ND_SD_JOURNAL_STATUS tmp_status = netdata_systemd_journal_query_one_file(filename, wb, facets, jf, lqs);
// nd_log(NDLS_COLLECTORS, NDLP_INFO,
// "JOURNAL ESTIMATION FINAL: '%s' "
@@ -1198,10 +818,10 @@ static int netdata_systemd_journal_query(BUFFER *wb, FACETS *facets, FUNCTION_QU
// , fqs->query_file.start_ut, fqs->query_file.stop_ut, fqs->query_file.stop_ut - fqs->query_file.start_ut
// );
- rows_useful = fqs->rows_useful - rows_useful;
- rows_read = fqs->rows_read - rows_read;
- bytes_read = fqs->bytes_read - bytes_read;
- matches_setup_ut = fqs->matches_setup_ut - matches_setup_ut;
+ rows_useful = lqs->c.rows_useful - rows_useful;
+ rows_read = lqs->c.rows_read - rows_read;
+ bytes_read = lqs->c.bytes_read - bytes_read;
+ matches_setup_ut = lqs->c.matches_setup_ut - matches_setup_ut;
fs_calls = fstat_thread_calls - fs_calls;
fs_cached = fstat_thread_cached_responses - fs_cached;
@@ -1215,7 +835,7 @@ static int netdata_systemd_journal_query(BUFFER *wb, FACETS *facets, FUNCTION_QU
if(progress_duration_ut >= SYSTEMD_JOURNAL_PROGRESS_EVERY_UT) {
progress_duration_ut = 0;
netdata_mutex_lock(&stdout_mutex);
- pluginsd_function_progress_to_stdout(fqs->transaction, f + 1, files_used);
+ pluginsd_function_progress_to_stdout(lqs->rq.transaction, f + 1, files_used);
netdata_mutex_unlock(&stdout_mutex);
}
@@ -1241,12 +861,12 @@ static int netdata_systemd_journal_query(BUFFER *wb, FACETS *facets, FUNCTION_QU
buffer_json_member_add_uint64(wb, "fstat_query_calls", fs_calls);
buffer_json_member_add_uint64(wb, "fstat_query_cached_responses", fs_cached);
- if(fqs->sampling) {
+ if(lqs->rq.sampling) {
buffer_json_member_add_object(wb, "_sampling");
{
- buffer_json_member_add_uint64(wb, "sampled", fqs->samples_per_file.sampled);
- buffer_json_member_add_uint64(wb, "unsampled", fqs->samples_per_file.unsampled);
- buffer_json_member_add_uint64(wb, "estimated", fqs->samples_per_file.estimated);
+ buffer_json_member_add_uint64(wb, "sampled", lqs->c.samples_per_file.sampled);
+ buffer_json_member_add_uint64(wb, "unsampled", lqs->c.samples_per_file.unsampled);
+ buffer_json_member_add_uint64(wb, "estimated", lqs->c.samples_per_file.estimated);
}
buffer_json_object_close(wb); // _sampling
}
@@ -1290,10 +910,8 @@ static int netdata_systemd_journal_query(BUFFER *wb, FACETS *facets, FUNCTION_QU
switch (status) {
case ND_SD_JOURNAL_OK:
- if(fqs->if_modified_since && !fqs->rows_useful) {
- buffer_flush(wb);
- return HTTP_RESP_NOT_MODIFIED;
- }
+ if(lqs->rq.if_modified_since && !lqs->c.rows_useful)
+ return rrd_call_function_error(wb, "No additional useful data since the previous call.", HTTP_RESP_NOT_MODIFIED);
break;
case ND_SD_JOURNAL_TIMED_OUT:
@@ -1301,18 +919,19 @@ static int netdata_systemd_journal_query(BUFFER *wb, FACETS *facets, FUNCTION_QU
break;
case ND_SD_JOURNAL_CANCELLED:
- buffer_flush(wb);
- return HTTP_RESP_CLIENT_CLOSED_REQUEST;
+ return rrd_call_function_error(wb, "Request cancelled.", HTTP_RESP_CLIENT_CLOSED_REQUEST);
case ND_SD_JOURNAL_NOT_MODIFIED:
- buffer_flush(wb);
- return HTTP_RESP_NOT_MODIFIED;
+ return rrd_call_function_error(wb, "No new data since the previous call.", HTTP_RESP_NOT_MODIFIED);
- default:
case ND_SD_JOURNAL_FAILED_TO_OPEN:
+ return rrd_call_function_error(wb, "Failed to open systemd journal file.", HTTP_RESP_INTERNAL_SERVER_ERROR);
+
case ND_SD_JOURNAL_FAILED_TO_SEEK:
- buffer_flush(wb);
- return HTTP_RESP_INTERNAL_SERVER_ERROR;
+ return rrd_call_function_error(wb, "Failed to seek in systemd journal file.", HTTP_RESP_INTERNAL_SERVER_ERROR);
+
+ default:
+ return rrd_call_function_error(wb, "Unknown status", HTTP_RESP_INTERNAL_SERVER_ERROR);
}
buffer_json_member_add_uint64(wb, "status", HTTP_RESP_OK);
@@ -1320,7 +939,7 @@ static int netdata_systemd_journal_query(BUFFER *wb, FACETS *facets, FUNCTION_QU
buffer_json_member_add_string(wb, "type", "table");
// build a message for the query
- if(!fqs->data_only) {
+ if(!lqs->rq.data_only) {
CLEAN_BUFFER *msg = buffer_create(0, NULL);
CLEAN_BUFFER *msg_description = buffer_create(0, NULL);
ND_LOG_FIELD_PRIORITY msg_priority = NDLP_INFO;
@@ -1339,17 +958,17 @@ static int netdata_systemd_journal_query(BUFFER *wb, FACETS *facets, FUNCTION_QU
msg_priority = NDLP_WARNING;
}
- if(fqs->samples.estimated || fqs->samples.unsampled) {
- double percent = (double) (fqs->samples.sampled * 100.0 /
- (fqs->samples.estimated + fqs->samples.unsampled + fqs->samples.sampled));
+ if(lqs->c.samples.estimated || lqs->c.samples.unsampled) {
+ double percent = (double) (lqs->c.samples.sampled * 100.0 /
+ (lqs->c.samples.estimated + lqs->c.samples.unsampled + lqs->c.samples.sampled));
buffer_sprintf(msg, "%.2f%% real data", percent);
buffer_sprintf(msg_description, "ACTUAL DATA: The filters counters reflect %0.2f%% of the data. ", percent);
msg_priority = MIN(msg_priority, NDLP_NOTICE);
}
- if(fqs->samples.unsampled) {
- double percent = (double) (fqs->samples.unsampled * 100.0 /
- (fqs->samples.estimated + fqs->samples.unsampled + fqs->samples.sampled));
+ if(lqs->c.samples.unsampled) {
+ double percent = (double) (lqs->c.samples.unsampled * 100.0 /
+ (lqs->c.samples.estimated + lqs->c.samples.unsampled + lqs->c.samples.sampled));
buffer_sprintf(msg, ", %.2f%% unsampled", percent);
buffer_sprintf(msg_description
, "UNSAMPLED DATA: %0.2f%% of the events exist and have been counted, but their values have not been evaluated, so they are not included in the filters counters. "
@@ -1357,9 +976,9 @@ static int netdata_systemd_journal_query(BUFFER *wb, FACETS *facets, FUNCTION_QU
msg_priority = MIN(msg_priority, NDLP_NOTICE);
}
- if(fqs->samples.estimated) {
- double percent = (double) (fqs->samples.estimated * 100.0 /
- (fqs->samples.estimated + fqs->samples.unsampled + fqs->samples.sampled));
+ if(lqs->c.samples.estimated) {
+ double percent = (double) (lqs->c.samples.estimated * 100.0 /
+ (lqs->c.samples.estimated + lqs->c.samples.unsampled + lqs->c.samples.sampled));
buffer_sprintf(msg, ", %.2f%% estimated", percent);
buffer_sprintf(msg_description
, "ESTIMATED DATA: The query selected a large amount of data, so to avoid delaying too much, the presented data are estimated by %0.2f%%. "
@@ -1377,18 +996,19 @@ static int netdata_systemd_journal_query(BUFFER *wb, FACETS *facets, FUNCTION_QU
buffer_json_object_close(wb); // message
}
- if(!fqs->data_only) {
+ if(!lqs->rq.data_only) {
buffer_json_member_add_time_t(wb, "update_every", 1);
buffer_json_member_add_string(wb, "help", SYSTEMD_JOURNAL_FUNCTION_DESCRIPTION);
}
- if(!fqs->data_only || fqs->tail)
- buffer_json_member_add_uint64(wb, "last_modified", fqs->last_modified);
+ if(!lqs->rq.data_only || lqs->rq.tail)
+ buffer_json_member_add_uint64(wb, "last_modified", lqs->last_modified);
facets_sort_and_reorder_keys(facets);
facets_report(facets, wb, used_hashes_registry);
- buffer_json_member_add_time_t(wb, "expires", now_realtime_sec() + (fqs->data_only ? 3600 : 0));
+ wb->expires = now_realtime_sec() + (lqs->rq.data_only ? 3600 : 0);
+ buffer_json_member_add_time_t(wb, "expires", wb->expires);
buffer_json_member_add_object(wb, "_fstat_caching");
{
@@ -1397,643 +1017,197 @@ static int netdata_systemd_journal_query(BUFFER *wb, FACETS *facets, FUNCTION_QU
}
buffer_json_object_close(wb); // _fstat_caching
- if(fqs->sampling) {
+ if(lqs->rq.sampling) {
buffer_json_member_add_object(wb, "_sampling");
{
- buffer_json_member_add_uint64(wb, "sampled", fqs->samples.sampled);
- buffer_json_member_add_uint64(wb, "unsampled", fqs->samples.unsampled);
- buffer_json_member_add_uint64(wb, "estimated", fqs->samples.estimated);
+ buffer_json_member_add_uint64(wb, "sampled", lqs->c.samples.sampled);
+ buffer_json_member_add_uint64(wb, "unsampled", lqs->c.samples.unsampled);
+ buffer_json_member_add_uint64(wb, "estimated", lqs->c.samples.estimated);
}
buffer_json_object_close(wb); // _sampling
}
- buffer_json_finalize(wb);
-
- return HTTP_RESP_OK;
+ wb->content_type = CT_APPLICATION_JSON;
+ wb->response_code = HTTP_RESP_OK;
+ return wb->response_code;
}
-static void netdata_systemd_journal_function_help(const char *transaction) {
- BUFFER *wb = buffer_create(0, NULL);
- buffer_sprintf(wb,
- "%s / %s\n"
- "\n"
- "%s\n"
- "\n"
- "The following parameters are supported:\n"
- "\n"
- " "JOURNAL_PARAMETER_HELP"\n"
- " Shows this help message.\n"
- "\n"
- " "JOURNAL_PARAMETER_INFO"\n"
- " Request initial configuration information about the plugin.\n"
- " The key entity returned is the required_params array, which includes\n"
- " all the available systemd journal sources.\n"
- " When `"JOURNAL_PARAMETER_INFO"` is requested, all other parameters are ignored.\n"
- "\n"
- " "JOURNAL_PARAMETER_DATA_ONLY":true or "JOURNAL_PARAMETER_DATA_ONLY":false\n"
- " Quickly respond with data requested, without generating a\n"
- " `histogram`, `facets` counters and `items`.\n"
- "\n"
- " "JOURNAL_PARAMETER_DELTA":true or "JOURNAL_PARAMETER_DELTA":false\n"
- " When doing data only queries, include deltas for histogram, facets and items.\n"
- "\n"
- " "JOURNAL_PARAMETER_TAIL":true or "JOURNAL_PARAMETER_TAIL":false\n"
- " When doing data only queries, respond with the newest messages,\n"
- " and up to the anchor, but calculate deltas (if requested) for\n"
- " the duration [anchor - before].\n"
- "\n"
- " "JOURNAL_PARAMETER_SLICE":true or "JOURNAL_PARAMETER_SLICE":false\n"
- " When it is turned on, the plugin is executing filtering via libsystemd,\n"
- " utilizing all the available indexes of the journal files.\n"
- " When it is off, only the time constraint is handled by libsystemd and\n"
- " all filtering is done by the plugin.\n"
- " The default is: %s\n"
- "\n"
- " "JOURNAL_PARAMETER_SOURCE":SOURCE\n"
- " Query only the specified journal sources.\n"
- " Do an `"JOURNAL_PARAMETER_INFO"` query to find the sources.\n"
- "\n"
- " "JOURNAL_PARAMETER_BEFORE":TIMESTAMP_IN_SECONDS\n"
- " Absolute or relative (to now) timestamp in seconds, to start the query.\n"
- " The query is always executed from the most recent to the oldest log entry.\n"
- " If not given the default is: now.\n"
- "\n"
- " "JOURNAL_PARAMETER_AFTER":TIMESTAMP_IN_SECONDS\n"
- " Absolute or relative (to `before`) timestamp in seconds, to end the query.\n"
- " If not given, the default is %d.\n"
- "\n"
- " "JOURNAL_PARAMETER_LAST":ITEMS\n"
- " The number of items to return.\n"
- " The default is %d.\n"
- "\n"
- " "JOURNAL_PARAMETER_SAMPLING":ITEMS\n"
- " The number of log entries to sample to estimate facets counters and histogram.\n"
- " The default is %d.\n"
- "\n"
- " "JOURNAL_PARAMETER_ANCHOR":TIMESTAMP_IN_MICROSECONDS\n"
- " Return items relative to this timestamp.\n"
- " The exact items to be returned depend on the query `"JOURNAL_PARAMETER_DIRECTION"`.\n"
- "\n"
- " "JOURNAL_PARAMETER_DIRECTION":forward or "JOURNAL_PARAMETER_DIRECTION":backward\n"
- " When set to `backward` (default) the items returned are the newest before the\n"
- " `"JOURNAL_PARAMETER_ANCHOR"`, (or `"JOURNAL_PARAMETER_BEFORE"` if `"JOURNAL_PARAMETER_ANCHOR"` is not set)\n"
- " When set to `forward` the items returned are the oldest after the\n"
- " `"JOURNAL_PARAMETER_ANCHOR"`, (or `"JOURNAL_PARAMETER_AFTER"` if `"JOURNAL_PARAMETER_ANCHOR"` is not set)\n"
- " The default is: %s\n"
- "\n"
- " "JOURNAL_PARAMETER_QUERY":SIMPLE_PATTERN\n"
- " Do a full text search to find the log entries matching the pattern given.\n"
- " The plugin is searching for matches on all fields of the database.\n"
- "\n"
- " "JOURNAL_PARAMETER_IF_MODIFIED_SINCE":TIMESTAMP_IN_MICROSECONDS\n"
- " Each successful response, includes a `last_modified` field.\n"
- " By providing the timestamp to the `"JOURNAL_PARAMETER_IF_MODIFIED_SINCE"` parameter,\n"
- " the plugin will return 200 with a successful response, or 304 if the source has not\n"
- " been modified since that timestamp.\n"
- "\n"
- " "JOURNAL_PARAMETER_HISTOGRAM":facet_id\n"
- " Use the given `facet_id` for the histogram.\n"
- " This parameter is ignored in `"JOURNAL_PARAMETER_DATA_ONLY"` mode.\n"
- "\n"
- " "JOURNAL_PARAMETER_FACETS":facet_id1,facet_id2,facet_id3,...\n"
- " Add the given facets to the list of fields for which analysis is required.\n"
- " The plugin will offer both a histogram and facet value counters for its values.\n"
- " This parameter is ignored in `"JOURNAL_PARAMETER_DATA_ONLY"` mode.\n"
- "\n"
- " facet_id:value_id1,value_id2,value_id3,...\n"
- " Apply filters to the query, based on the facet IDs returned.\n"
- " Each `facet_id` can be given once, but multiple `facet_ids` can be given.\n"
- "\n"
- , program_name
- , SYSTEMD_JOURNAL_FUNCTION_NAME
- , SYSTEMD_JOURNAL_FUNCTION_DESCRIPTION
- , JOURNAL_DEFAULT_SLICE_MODE ? "true" : "false" // slice
- , -SYSTEMD_JOURNAL_DEFAULT_QUERY_DURATION
- , SYSTEMD_JOURNAL_DEFAULT_ITEMS_PER_QUERY
- , SYSTEMD_JOURNAL_DEFAULT_ITEMS_SAMPLING
- , JOURNAL_DEFAULT_DIRECTION == FACETS_ANCHOR_DIRECTION_BACKWARD ? "backward" : "forward"
- );
+static void systemd_journal_register_transformations(LOGS_QUERY_STATUS *lqs) {
+ FACETS *facets = lqs->facets;
+ LOGS_QUERY_REQUEST *rq = &lqs->rq;
- netdata_mutex_lock(&stdout_mutex);
- pluginsd_function_result_to_stdout(transaction, HTTP_RESP_OK, "text/plain", now_realtime_sec() + 3600, wb);
- netdata_mutex_unlock(&stdout_mutex);
+ // ----------------------------------------------------------------------------------------------------------------
+ // register the fields in the order you want them on the dashboard
+
+ facets_register_row_severity(facets, syslog_priority_to_facet_severity, NULL);
- buffer_free(wb);
+ facets_register_key_name(
+ facets, "_HOSTNAME", rq->default_facet | FACET_KEY_OPTION_VISIBLE);
+
+ facets_register_dynamic_key_name(
+ facets, JOURNAL_KEY_ND_JOURNAL_PROCESS,
+ FACET_KEY_OPTION_NEVER_FACET | FACET_KEY_OPTION_VISIBLE,
+ netdata_systemd_journal_dynamic_row_id, NULL);
+
+ facets_register_key_name(
+ facets, "MESSAGE",
+ FACET_KEY_OPTION_NEVER_FACET | FACET_KEY_OPTION_MAIN_TEXT |
+ FACET_KEY_OPTION_VISIBLE | FACET_KEY_OPTION_FTS);
+
+ // facets_register_dynamic_key_name(
+ // facets, "MESSAGE",
+ // FACET_KEY_OPTION_NEVER_FACET | FACET_KEY_OPTION_MAIN_TEXT | FACET_KEY_OPTION_RICH_TEXT |
+ // FACET_KEY_OPTION_VISIBLE | FACET_KEY_OPTION_FTS,
+ // netdata_systemd_journal_rich_message, NULL);
+
+ facets_register_key_name_transformation(
+ facets, "PRIORITY",
+ rq->default_facet | FACET_KEY_OPTION_TRANSFORM_VIEW |
+ FACET_KEY_OPTION_EXPANDED_FILTER,
+ netdata_systemd_journal_transform_priority, NULL);
+
+ facets_register_key_name_transformation(
+ facets, "SYSLOG_FACILITY",
+ rq->default_facet | FACET_KEY_OPTION_TRANSFORM_VIEW |
+ FACET_KEY_OPTION_EXPANDED_FILTER,
+ netdata_systemd_journal_transform_syslog_facility, NULL);
+
+ facets_register_key_name_transformation(
+ facets, "ERRNO",
+ rq->default_facet | FACET_KEY_OPTION_TRANSFORM_VIEW,
+ netdata_systemd_journal_transform_errno, NULL);
+
+ facets_register_key_name(
+ facets, JOURNAL_KEY_ND_JOURNAL_FILE,
+ FACET_KEY_OPTION_NEVER_FACET);
+
+ facets_register_key_name(
+ facets, "SYSLOG_IDENTIFIER", rq->default_facet);
+
+ facets_register_key_name(
+ facets, "UNIT", rq->default_facet);
+
+ facets_register_key_name(
+ facets, "USER_UNIT", rq->default_facet);
+
+ facets_register_key_name_transformation(
+ facets, "MESSAGE_ID",
+ rq->default_facet | FACET_KEY_OPTION_TRANSFORM_VIEW |
+ FACET_KEY_OPTION_EXPANDED_FILTER,
+ netdata_systemd_journal_transform_message_id, NULL);
+
+ facets_register_key_name_transformation(
+ facets, "_BOOT_ID",
+ rq->default_facet | FACET_KEY_OPTION_TRANSFORM_VIEW,
+ netdata_systemd_journal_transform_boot_id, NULL);
+
+ facets_register_key_name_transformation(
+ facets, "_SYSTEMD_OWNER_UID",
+ rq->default_facet | FACET_KEY_OPTION_TRANSFORM_VIEW,
+ netdata_systemd_journal_transform_uid, NULL);
+
+ facets_register_key_name_transformation(
+ facets, "_UID",
+ rq->default_facet | FACET_KEY_OPTION_TRANSFORM_VIEW,
+ netdata_systemd_journal_transform_uid, NULL);
+
+ facets_register_key_name_transformation(
+ facets, "OBJECT_SYSTEMD_OWNER_UID",
+ rq->default_facet | FACET_KEY_OPTION_TRANSFORM_VIEW,
+ netdata_systemd_journal_transform_uid, NULL);
+
+ facets_register_key_name_transformation(
+ facets, "OBJECT_UID",
+ rq->default_facet | FACET_KEY_OPTION_TRANSFORM_VIEW,
+ netdata_systemd_journal_transform_uid, NULL);
+
+ facets_register_key_name_transformation(
+ facets, "_GID",
+ rq->default_facet | FACET_KEY_OPTION_TRANSFORM_VIEW,
+ netdata_systemd_journal_transform_gid, NULL);
+
+ facets_register_key_name_transformation(
+ facets, "OBJECT_GID",
+ rq->default_facet | FACET_KEY_OPTION_TRANSFORM_VIEW,
+ netdata_systemd_journal_transform_gid, NULL);
+
+ facets_register_key_name_transformation(
+ facets, "_CAP_EFFECTIVE",
+ FACET_KEY_OPTION_TRANSFORM_VIEW,
+ netdata_systemd_journal_transform_cap_effective, NULL);
+
+ facets_register_key_name_transformation(
+ facets, "_AUDIT_LOGINUID",
+ FACET_KEY_OPTION_TRANSFORM_VIEW,
+ netdata_systemd_journal_transform_uid, NULL);
+
+ facets_register_key_name_transformation(
+ facets, "OBJECT_AUDIT_LOGINUID",
+ FACET_KEY_OPTION_TRANSFORM_VIEW,
+ netdata_systemd_journal_transform_uid, NULL);
+
+ facets_register_key_name_transformation(
+ facets, "_SOURCE_REALTIME_TIMESTAMP",
+ FACET_KEY_OPTION_TRANSFORM_VIEW,
+ netdata_systemd_journal_transform_timestamp_usec, NULL);
}
void function_systemd_journal(const char *transaction, char *function, usec_t *stop_monotonic_ut, bool *cancelled,
- BUFFER *payload __maybe_unused, HTTP_ACCESS access __maybe_unused,
+ BUFFER *payload, HTTP_ACCESS access __maybe_unused,
const char *source __maybe_unused, void *data __maybe_unused) {
fstat_thread_calls = 0;
fstat_thread_cached_responses = 0;
- BUFFER *wb = buffer_create(0, NULL);
- buffer_flush(wb);
- buffer_json_initialize(wb, "\"", "\"", 0, true, BUFFER_JSON_OPTIONS_MINIFY);
-
- FUNCTION_QUERY_STATUS tmp_fqs = {
- .cancelled = cancelled,
- .stop_monotonic_ut = stop_monotonic_ut,
- };
- FUNCTION_QUERY_STATUS *fqs = NULL;
-
- FACETS *facets = facets_create(50, FACETS_OPTION_ALL_KEYS_FTS,
- SYSTEMD_ALWAYS_VISIBLE_KEYS,
- SYSTEMD_KEYS_INCLUDED_IN_FACETS,
- SYSTEMD_KEYS_EXCLUDED_FROM_FACETS);
-
- facets_accepted_param(facets, JOURNAL_PARAMETER_INFO);
- facets_accepted_param(facets, JOURNAL_PARAMETER_SOURCE);
- facets_accepted_param(facets, JOURNAL_PARAMETER_AFTER);
- facets_accepted_param(facets, JOURNAL_PARAMETER_BEFORE);
- facets_accepted_param(facets, JOURNAL_PARAMETER_ANCHOR);
- facets_accepted_param(facets, JOURNAL_PARAMETER_DIRECTION);
- facets_accepted_param(facets, JOURNAL_PARAMETER_LAST);
- facets_accepted_param(facets, JOURNAL_PARAMETER_QUERY);
- facets_accepted_param(facets, JOURNAL_PARAMETER_FACETS);
- facets_accepted_param(facets, JOURNAL_PARAMETER_HISTOGRAM);
- facets_accepted_param(facets, JOURNAL_PARAMETER_IF_MODIFIED_SINCE);
- facets_accepted_param(facets, JOURNAL_PARAMETER_DATA_ONLY);
- facets_accepted_param(facets, JOURNAL_PARAMETER_DELTA);
- facets_accepted_param(facets, JOURNAL_PARAMETER_TAIL);
- facets_accepted_param(facets, JOURNAL_PARAMETER_SAMPLING);
-
#ifdef HAVE_SD_JOURNAL_RESTART_FIELDS
- facets_accepted_param(facets, JOURNAL_PARAMETER_SLICE);
+ bool have_slice = true;
+#else
+ bool have_slice = false;
#endif // HAVE_SD_JOURNAL_RESTART_FIELDS
- // register the fields in the order you want them on the dashboard
-
- facets_register_row_severity(facets, syslog_priority_to_facet_severity, NULL);
-
- facets_register_key_name(facets, "_HOSTNAME",
- FACET_KEY_OPTION_FACET | FACET_KEY_OPTION_VISIBLE);
-
- facets_register_dynamic_key_name(facets, JOURNAL_KEY_ND_JOURNAL_PROCESS,
- FACET_KEY_OPTION_NEVER_FACET | FACET_KEY_OPTION_VISIBLE,
- netdata_systemd_journal_dynamic_row_id, NULL);
-
- facets_register_key_name(facets, "MESSAGE",
- FACET_KEY_OPTION_NEVER_FACET | FACET_KEY_OPTION_MAIN_TEXT |
- FACET_KEY_OPTION_VISIBLE | FACET_KEY_OPTION_FTS);
-
-// facets_register_dynamic_key_name(facets, "MESSAGE",
-// FACET_KEY_OPTION_NEVER_FACET | FACET_KEY_OPTION_MAIN_TEXT | FACET_KEY_OPTION_RICH_TEXT |
-// FACET_KEY_OPTION_VISIBLE | FACET_KEY_OPTION_FTS,
-// netdata_systemd_journal_rich_message, NULL);
-
- facets_register_key_name_transformation(facets, "PRIORITY",
- FACET_KEY_OPTION_FACET | FACET_KEY_OPTION_TRANSFORM_VIEW |
- FACET_KEY_OPTION_EXPANDED_FILTER,
- netdata_systemd_journal_transform_priority, NULL);
-
- facets_register_key_name_transformation(facets, "SYSLOG_FACILITY",
- FACET_KEY_OPTION_FACET | FACET_KEY_OPTION_TRANSFORM_VIEW |
- FACET_KEY_OPTION_EXPANDED_FILTER,
- netdata_systemd_journal_transform_syslog_facility, NULL);
-
- facets_register_key_name_transformation(facets, "ERRNO",
- FACET_KEY_OPTION_FACET | FACET_KEY_OPTION_TRANSFORM_VIEW,
- netdata_systemd_journal_transform_errno, NULL);
-
- facets_register_key_name(facets, JOURNAL_KEY_ND_JOURNAL_FILE,
- FACET_KEY_OPTION_NEVER_FACET);
-
- facets_register_key_name(facets, "SYSLOG_IDENTIFIER",
- FACET_KEY_OPTION_FACET);
-
- facets_register_key_name(facets, "UNIT",
- FACET_KEY_OPTION_FACET);
-
- facets_register_key_name(facets, "USER_UNIT",
- FACET_KEY_OPTION_FACET);
-
- facets_register_key_name_transformation(facets, "MESSAGE_ID",
- FACET_KEY_OPTION_FACET | FACET_KEY_OPTION_TRANSFORM_VIEW |
- FACET_KEY_OPTION_EXPANDED_FILTER,
- netdata_systemd_journal_transform_message_id, NULL);
-
- facets_register_key_name_transformation(facets, "_BOOT_ID",
- FACET_KEY_OPTION_FACET | FACET_KEY_OPTION_TRANSFORM_VIEW,
- netdata_systemd_journal_transform_boot_id, NULL);
-
- facets_register_key_name_transformation(facets, "_SYSTEMD_OWNER_UID",
- FACET_KEY_OPTION_FACET | FACET_KEY_OPTION_TRANSFORM_VIEW,
- netdata_systemd_journal_transform_uid, NULL);
-
- facets_register_key_name_transformation(facets, "_UID",
- FACET_KEY_OPTION_FACET | FACET_KEY_OPTION_TRANSFORM_VIEW,
- netdata_systemd_journal_transform_uid, NULL);
-
- facets_register_key_name_transformation(facets, "OBJECT_SYSTEMD_OWNER_UID",
- FACET_KEY_OPTION_FACET | FACET_KEY_OPTION_TRANSFORM_VIEW,
- netdata_systemd_journal_transform_uid, NULL);
-
- facets_register_key_name_transformation(facets, "OBJECT_UID",
- FACET_KEY_OPTION_FACET | FACET_KEY_OPTION_TRANSFORM_VIEW,
- netdata_systemd_journal_transform_uid, NULL);
+ LOGS_QUERY_STATUS tmp_fqs = {
+ .facets = lqs_facets_create(
+ LQS_DEFAULT_ITEMS_PER_QUERY,
+ FACETS_OPTION_ALL_KEYS_FTS | FACETS_OPTION_HASH_IDS,
+ SYSTEMD_ALWAYS_VISIBLE_KEYS,
+ SYSTEMD_KEYS_INCLUDED_IN_FACETS,
+ SYSTEMD_KEYS_EXCLUDED_FROM_FACETS,
+ have_slice),
- facets_register_key_name_transformation(facets, "_GID",
- FACET_KEY_OPTION_FACET | FACET_KEY_OPTION_TRANSFORM_VIEW,
- netdata_systemd_journal_transform_gid, NULL);
+ .rq = LOGS_QUERY_REQUEST_DEFAULTS(transaction, LQS_DEFAULT_SLICE_MODE, JOURNAL_DEFAULT_DIRECTION),
- facets_register_key_name_transformation(facets, "OBJECT_GID",
- FACET_KEY_OPTION_FACET | FACET_KEY_OPTION_TRANSFORM_VIEW,
- netdata_systemd_journal_transform_gid, NULL);
-
- facets_register_key_name_transformation(facets, "_CAP_EFFECTIVE",
- FACET_KEY_OPTION_TRANSFORM_VIEW,
- netdata_systemd_journal_transform_cap_effective, NULL);
-
- facets_register_key_name_transformation(facets, "_AUDIT_LOGINUID",
- FACET_KEY_OPTION_TRANSFORM_VIEW,
- netdata_systemd_journal_transform_uid, NULL);
-
- facets_register_key_name_transformation(facets, "OBJECT_AUDIT_LOGINUID",
- FACET_KEY_OPTION_TRANSFORM_VIEW,
- netdata_systemd_journal_transform_uid, NULL);
+ .cancelled = cancelled,
+ .stop_monotonic_ut = stop_monotonic_ut,
+ };
+ LOGS_QUERY_STATUS *lqs = &tmp_fqs;
- facets_register_key_name_transformation(facets, "_SOURCE_REALTIME_TIMESTAMP",
- FACET_KEY_OPTION_TRANSFORM_VIEW,
- netdata_systemd_journal_transform_timestamp_usec, NULL);
+ CLEAN_BUFFER *wb = lqs_create_output_buffer();
// ------------------------------------------------------------------------
// parse the parameters
- bool info = false, data_only = false, slice = JOURNAL_DEFAULT_SLICE_MODE, delta = false, tail = false;
- time_t after_s = 0, before_s = 0;
- usec_t anchor = 0;
- usec_t if_modified_since = 0;
- size_t last = 0;
- FACETS_ANCHOR_DIRECTION direction = JOURNAL_DEFAULT_DIRECTION;
- const char *query = NULL;
- const char *chart = NULL;
- SIMPLE_PATTERN *sources = NULL;
- SD_JOURNAL_FILE_SOURCE_TYPE source_type = SDJF_ALL;
- size_t filters = 0;
- size_t sampling = SYSTEMD_JOURNAL_DEFAULT_ITEMS_SAMPLING;
-
- buffer_json_member_add_object(wb, "_request");
-
- char *words[SYSTEMD_JOURNAL_MAX_PARAMS] = { NULL };
- size_t num_words = quoted_strings_splitter_pluginsd(function, words, SYSTEMD_JOURNAL_MAX_PARAMS);
- for(int i = 1; i < SYSTEMD_JOURNAL_MAX_PARAMS ;i++) {
- char *keyword = get_word(words, num_words, i);
- if(!keyword) break;
-
- if(strcmp(keyword, JOURNAL_PARAMETER_HELP) == 0) {
- netdata_systemd_journal_function_help(transaction);
- goto cleanup;
- }
- else if(strcmp(keyword, JOURNAL_PARAMETER_INFO) == 0) {
- info = true;
- }
- else if(strncmp(keyword, JOURNAL_PARAMETER_DELTA ":", sizeof(JOURNAL_PARAMETER_DELTA ":") - 1) == 0) {
- char *v = &keyword[sizeof(JOURNAL_PARAMETER_DELTA ":") - 1];
-
- if(strcmp(v, "false") == 0 || strcmp(v, "no") == 0 || strcmp(v, "0") == 0)
- delta = false;
- else
- delta = true;
- }
- else if(strncmp(keyword, JOURNAL_PARAMETER_TAIL ":", sizeof(JOURNAL_PARAMETER_TAIL ":") - 1) == 0) {
- char *v = &keyword[sizeof(JOURNAL_PARAMETER_TAIL ":") - 1];
-
- if(strcmp(v, "false") == 0 || strcmp(v, "no") == 0 || strcmp(v, "0") == 0)
- tail = false;
- else
- tail = true;
- }
- else if(strncmp(keyword, JOURNAL_PARAMETER_SAMPLING ":", sizeof(JOURNAL_PARAMETER_SAMPLING ":") - 1) == 0) {
- sampling = str2ul(&keyword[sizeof(JOURNAL_PARAMETER_SAMPLING ":") - 1]);
- }
- else if(strncmp(keyword, JOURNAL_PARAMETER_DATA_ONLY ":", sizeof(JOURNAL_PARAMETER_DATA_ONLY ":") - 1) == 0) {
- char *v = &keyword[sizeof(JOURNAL_PARAMETER_DATA_ONLY ":") - 1];
-
- if(strcmp(v, "false") == 0 || strcmp(v, "no") == 0 || strcmp(v, "0") == 0)
- data_only = false;
- else
- data_only = true;
- }
- else if(strncmp(keyword, JOURNAL_PARAMETER_SLICE ":", sizeof(JOURNAL_PARAMETER_SLICE ":") - 1) == 0) {
- char *v = &keyword[sizeof(JOURNAL_PARAMETER_SLICE ":") - 1];
-
- if(strcmp(v, "false") == 0 || strcmp(v, "no") == 0 || strcmp(v, "0") == 0)
- slice = false;
- else
- slice = true;
- }
- else if(strncmp(keyword, JOURNAL_PARAMETER_SOURCE ":", sizeof(JOURNAL_PARAMETER_SOURCE ":") - 1) == 0) {
- const char *value = &keyword[sizeof(JOURNAL_PARAMETER_SOURCE ":") - 1];
-
- buffer_json_member_add_array(wb, JOURNAL_PARAMETER_SOURCE);
-
- BUFFER *sources_list = buffer_create(0, NULL);
-
- source_type = SDJF_NONE;
- while(value) {
- char *sep = strchr(value, ',');
- if(sep)
- *sep++ = '\0';
+ if(lqs_request_parse_and_validate(lqs, wb, function, payload, have_slice, "PRIORITY")) {
+ systemd_journal_register_transformations(lqs);
- buffer_json_add_array_item_string(wb, value);
+ // ------------------------------------------------------------------------
+ // add versions to the response
- if(strcmp(value, SDJF_SOURCE_ALL_NAME) == 0) {
- source_type |= SDJF_ALL;
- value = NULL;
- }
- else if(strcmp(value, SDJF_SOURCE_LOCAL_NAME) == 0) {
- source_type |= SDJF_LOCAL_ALL;
- value = NULL;
- }
- else if(strcmp(value, SDJF_SOURCE_REMOTES_NAME) == 0) {
- source_type |= SDJF_REMOTE_ALL;
- value = NULL;
- }
- else if(strcmp(value, SDJF_SOURCE_NAMESPACES_NAME) == 0) {
- source_type |= SDJF_LOCAL_NAMESPACE;
- value = NULL;
- }
- else if(strcmp(value, SDJF_SOURCE_LOCAL_SYSTEM_NAME) == 0) {
- source_type |= SDJF_LOCAL_SYSTEM;
- value = NULL;
- }
- else if(strcmp(value, SDJF_SOURCE_LOCAL_USERS_NAME) == 0) {
- source_type |= SDJF_LOCAL_USER;
- value = NULL;
- }
- else if(strcmp(value, SDJF_SOURCE_LOCAL_OTHER_NAME) == 0) {
- source_type |= SDJF_LOCAL_OTHER;
- value = NULL;
- }
- else {
- // else, match the source, whatever it is
- if(buffer_strlen(sources_list))
- buffer_strcat(sources_list, ",");
-
- buffer_strcat(sources_list, value);
- }
+ buffer_json_journal_versions(wb);
- value = sep;
- }
-
- if(buffer_strlen(sources_list)) {
- simple_pattern_free(sources);
- sources = simple_pattern_create(buffer_tostring(sources_list), ",", SIMPLE_PATTERN_EXACT, false);
- }
-
- buffer_free(sources_list);
-
- buffer_json_array_close(wb); // source
- }
- else if(strncmp(keyword, JOURNAL_PARAMETER_AFTER ":", sizeof(JOURNAL_PARAMETER_AFTER ":") - 1) == 0) {
- after_s = str2l(&keyword[sizeof(JOURNAL_PARAMETER_AFTER ":") - 1]);
- }
- else if(strncmp(keyword, JOURNAL_PARAMETER_BEFORE ":", sizeof(JOURNAL_PARAMETER_BEFORE ":") - 1) == 0) {
- before_s = str2l(&keyword[sizeof(JOURNAL_PARAMETER_BEFORE ":") - 1]);
- }
- else if(strncmp(keyword, JOURNAL_PARAMETER_IF_MODIFIED_SINCE ":", sizeof(JOURNAL_PARAMETER_IF_MODIFIED_SINCE ":") - 1) == 0) {
- if_modified_since = str2ull(&keyword[sizeof(JOURNAL_PARAMETER_IF_MODIFIED_SINCE ":") - 1], NULL);
- }
- else if(strncmp(keyword, JOURNAL_PARAMETER_ANCHOR ":", sizeof(JOURNAL_PARAMETER_ANCHOR ":") - 1) == 0) {
- anchor = str2ull(&keyword[sizeof(JOURNAL_PARAMETER_ANCHOR ":") - 1], NULL);
- }
- else if(strncmp(keyword, JOURNAL_PARAMETER_DIRECTION ":", sizeof(JOURNAL_PARAMETER_DIRECTION ":") - 1) == 0) {
- direction = strcasecmp(&keyword[sizeof(JOURNAL_PARAMETER_DIRECTION ":") - 1], "forward") == 0 ? FACETS_ANCHOR_DIRECTION_FORWARD : FACETS_ANCHOR_DIRECTION_BACKWARD;
- }
- else if(strncmp(keyword, JOURNAL_PARAMETER_LAST ":", sizeof(JOURNAL_PARAMETER_LAST ":") - 1) == 0) {
- last = str2ul(&keyword[sizeof(JOURNAL_PARAMETER_LAST ":") - 1]);
- }
- else if(strncmp(keyword, JOURNAL_PARAMETER_QUERY ":", sizeof(JOURNAL_PARAMETER_QUERY ":") - 1) == 0) {
- query= &keyword[sizeof(JOURNAL_PARAMETER_QUERY ":") - 1];
- }
- else if(strncmp(keyword, JOURNAL_PARAMETER_HISTOGRAM ":", sizeof(JOURNAL_PARAMETER_HISTOGRAM ":") - 1) == 0) {
- chart = &keyword[sizeof(JOURNAL_PARAMETER_HISTOGRAM ":") - 1];
- }
- else if(strncmp(keyword, JOURNAL_PARAMETER_FACETS ":", sizeof(JOURNAL_PARAMETER_FACETS ":") - 1) == 0) {
- char *value = &keyword[sizeof(JOURNAL_PARAMETER_FACETS ":") - 1];
- if(*value) {
- buffer_json_member_add_array(wb, JOURNAL_PARAMETER_FACETS);
+ // ------------------------------------------------------------------------
+ // run the request
- while(value) {
- char *sep = strchr(value, ',');
- if(sep)
- *sep++ = '\0';
-
- facets_register_facet_id(facets, value, FACET_KEY_OPTION_FACET|FACET_KEY_OPTION_FTS|FACET_KEY_OPTION_REORDER);
- buffer_json_add_array_item_string(wb, value);
-
- value = sep;
- }
-
- buffer_json_array_close(wb); // JOURNAL_PARAMETER_FACETS
- }
- }
+ if (lqs->rq.info)
+ lqs_info_response(wb, lqs->facets);
else {
- char *value = strchr(keyword, ':');
- if(value) {
- *value++ = '\0';
-
- buffer_json_member_add_array(wb, keyword);
-
- while(value) {
- char *sep = strchr(value, ',');
- if(sep)
- *sep++ = '\0';
-
- facets_register_facet_id_filter(facets, keyword, value, FACET_KEY_OPTION_FACET|FACET_KEY_OPTION_FTS|FACET_KEY_OPTION_REORDER);
- buffer_json_add_array_item_string(wb, value);
- filters++;
-
- value = sep;
- }
-
- buffer_json_array_close(wb); // keyword
- }
+ netdata_systemd_journal_query(wb, lqs);
+ if (wb->response_code == HTTP_RESP_OK)
+ buffer_json_finalize(wb);
}
}
- // ------------------------------------------------------------------------
- // put this request into the progress db
-
- fqs = &tmp_fqs;
-
- // ------------------------------------------------------------------------
- // validate parameters
-
- time_t now_s = now_realtime_sec();
- time_t expires = now_s + 1;
-
- if(!after_s && !before_s) {
- before_s = now_s;
- after_s = before_s - SYSTEMD_JOURNAL_DEFAULT_QUERY_DURATION;
- }
- else
- rrdr_relative_window_to_absolute(&after_s, &before_s, now_s);
-
- if(after_s > before_s) {
- time_t tmp = after_s;
- after_s = before_s;
- before_s = tmp;
- }
-
- if(after_s == before_s)
- after_s = before_s - SYSTEMD_JOURNAL_DEFAULT_QUERY_DURATION;
-
- if(!last)
- last = SYSTEMD_JOURNAL_DEFAULT_ITEMS_PER_QUERY;
-
-
- // ------------------------------------------------------------------------
- // set query time-frame, anchors and direction
-
- fqs->transaction = transaction;
- fqs->after_ut = after_s * USEC_PER_SEC;
- fqs->before_ut = (before_s * USEC_PER_SEC) + USEC_PER_SEC - 1;
- fqs->if_modified_since = if_modified_since;
- fqs->data_only = data_only;
- fqs->delta = (fqs->data_only) ? delta : false;
- fqs->tail = (fqs->data_only && fqs->if_modified_since) ? tail : false;
- fqs->sources = sources;
- fqs->source_type = source_type;
- fqs->entries = last;
- fqs->last_modified = 0;
- fqs->filters = filters;
- fqs->query = (query && *query) ? query : NULL;
- fqs->histogram = (chart && *chart) ? chart : NULL;
- fqs->direction = direction;
- fqs->anchor.start_ut = anchor;
- fqs->anchor.stop_ut = 0;
- fqs->sampling = sampling;
-
- if(fqs->anchor.start_ut && fqs->tail) {
- // a tail request
- // we need the top X entries from BEFORE
- // but, we need to calculate the facets and the
- // histogram up to the anchor
- fqs->direction = direction = FACETS_ANCHOR_DIRECTION_BACKWARD;
- fqs->anchor.start_ut = 0;
- fqs->anchor.stop_ut = anchor;
- }
-
- if(anchor && anchor < fqs->after_ut) {
- log_fqs(fqs, "received anchor is too small for query timeframe, ignoring anchor");
- anchor = 0;
- fqs->anchor.start_ut = 0;
- fqs->anchor.stop_ut = 0;
- fqs->direction = direction = FACETS_ANCHOR_DIRECTION_BACKWARD;
- }
- else if(anchor > fqs->before_ut) {
- log_fqs(fqs, "received anchor is too big for query timeframe, ignoring anchor");
- anchor = 0;
- fqs->anchor.start_ut = 0;
- fqs->anchor.stop_ut = 0;
- fqs->direction = direction = FACETS_ANCHOR_DIRECTION_BACKWARD;
- }
-
- facets_set_anchor(facets, fqs->anchor.start_ut, fqs->anchor.stop_ut, fqs->direction);
-
- facets_set_additional_options(facets,
- ((fqs->data_only) ? FACETS_OPTION_DATA_ONLY : 0) |
- ((fqs->delta) ? FACETS_OPTION_SHOW_DELTAS : 0));
-
- // ------------------------------------------------------------------------
- // set the rest of the query parameters
-
-
- facets_set_items(facets, fqs->entries);
- facets_set_query(facets, fqs->query);
-
-#ifdef HAVE_SD_JOURNAL_RESTART_FIELDS
- fqs->slice = slice;
- if(slice)
- facets_enable_slice_mode(facets);
-#else
- fqs->slice = false;
-#endif
-
- if(fqs->histogram)
- facets_set_timeframe_and_histogram_by_id(facets, fqs->histogram, fqs->after_ut, fqs->before_ut);
- else
- facets_set_timeframe_and_histogram_by_name(facets, "PRIORITY", fqs->after_ut, fqs->before_ut);
-
-
- // ------------------------------------------------------------------------
- // complete the request object
-
- buffer_json_member_add_boolean(wb, JOURNAL_PARAMETER_INFO, false);
- buffer_json_member_add_boolean(wb, JOURNAL_PARAMETER_SLICE, fqs->slice);
- buffer_json_member_add_boolean(wb, JOURNAL_PARAMETER_DATA_ONLY, fqs->data_only);
- buffer_json_member_add_boolean(wb, JOURNAL_PARAMETER_DELTA, fqs->delta);
- buffer_json_member_add_boolean(wb, JOURNAL_PARAMETER_TAIL, fqs->tail);
- buffer_json_member_add_uint64(wb, JOURNAL_PARAMETER_SAMPLING, fqs->sampling);
- buffer_json_member_add_uint64(wb, "source_type", fqs->source_type);
- buffer_json_member_add_uint64(wb, JOURNAL_PARAMETER_AFTER, fqs->after_ut / USEC_PER_SEC);
- buffer_json_member_add_uint64(wb, JOURNAL_PARAMETER_BEFORE, fqs->before_ut / USEC_PER_SEC);
- buffer_json_member_add_uint64(wb, "if_modified_since", fqs->if_modified_since);
- buffer_json_member_add_uint64(wb, JOURNAL_PARAMETER_ANCHOR, anchor);
- buffer_json_member_add_string(wb, JOURNAL_PARAMETER_DIRECTION, fqs->direction == FACETS_ANCHOR_DIRECTION_FORWARD ? "forward" : "backward");
- buffer_json_member_add_uint64(wb, JOURNAL_PARAMETER_LAST, fqs->entries);
- buffer_json_member_add_string(wb, JOURNAL_PARAMETER_QUERY, fqs->query);
- buffer_json_member_add_string(wb, JOURNAL_PARAMETER_HISTOGRAM, fqs->histogram);
- buffer_json_object_close(wb); // request
-
- buffer_json_journal_versions(wb);
-
- // ------------------------------------------------------------------------
- // run the request
-
- int response;
-
- if(info) {
- facets_accepted_parameters_to_json_array(facets, wb, false);
- buffer_json_member_add_array(wb, "required_params");
- {
- buffer_json_add_array_item_object(wb);
- {
- buffer_json_member_add_string(wb, "id", "source");
- buffer_json_member_add_string(wb, "name", "source");
- buffer_json_member_add_string(wb, "help", "Select the SystemD Journal source to query");
- buffer_json_member_add_string(wb, "type", "multiselect");
- buffer_json_member_add_array(wb, "options");
- {
- available_journal_file_sources_to_json_array(wb);
- }
- buffer_json_array_close(wb); // options array
- }
- buffer_json_object_close(wb); // required params object
- }
- buffer_json_array_close(wb); // required_params array
-
- facets_table_config(wb);
-
- buffer_json_member_add_uint64(wb, "status", HTTP_RESP_OK);
- buffer_json_member_add_string(wb, "type", "table");
- buffer_json_member_add_string(wb, "help", SYSTEMD_JOURNAL_FUNCTION_DESCRIPTION);
- buffer_json_finalize(wb);
- response = HTTP_RESP_OK;
- goto output;
- }
-
- response = netdata_systemd_journal_query(wb, facets, fqs);
-
- // ------------------------------------------------------------------------
- // handle error response
-
- if(response != HTTP_RESP_OK) {
- netdata_mutex_lock(&stdout_mutex);
- pluginsd_function_json_error_to_stdout(transaction, response, "failed");
- netdata_mutex_unlock(&stdout_mutex);
- goto cleanup;
- }
-
-output:
netdata_mutex_lock(&stdout_mutex);
- pluginsd_function_result_to_stdout(transaction, response, "application/json", expires, wb);
+ pluginsd_function_result_to_stdout(transaction, wb);
netdata_mutex_unlock(&stdout_mutex);
-cleanup:
- simple_pattern_free(sources);
- facets_destroy(facets);
- buffer_free(wb);
+ lqs_cleanup(lqs);
}
diff --git a/src/collectors/systemd-journal.plugin/systemd-main.c b/src/collectors/systemd-journal.plugin/systemd-main.c
index e3afe4e86..e7d79c413 100644
--- a/src/collectors/systemd-journal.plugin/systemd-main.c
+++ b/src/collectors/systemd-journal.plugin/systemd-main.c
@@ -18,7 +18,6 @@ static bool journal_data_directories_exist() {
}
int main(int argc __maybe_unused, char **argv __maybe_unused) {
- clocks_init();
nd_thread_tag_set("sd-jrnl.plugin");
nd_log_initialize_for_external_plugins("systemd-journal.plugin");
@@ -46,7 +45,8 @@ int main(int argc __maybe_unused, char **argv __maybe_unused) {
bool cancelled = false;
usec_t stop_monotonic_ut = now_monotonic_usec() + 600 * USEC_PER_SEC;
- char buf[] = "systemd-journal after:-8640000 before:0 direction:backward last:200 data_only:false slice:true source:all";
+ // char buf[] = "systemd-journal after:1726573205 before:1726574105 last:200 facets:F9q1S4.MEeL,DHKucpqUoe1,MewN7JHJ.3X,LKWfxQdCIoc,IjWzTvQ9.4t,O6O.cgYOhns,KmQ1KSeTSfO,IIsT7Ytfxy6,EQD6.NflJQq,I6rMJzShJIE,DxoIlg6RTuM,AU4H2NMVPXJ,H4mcdIPho07,EDYhj5U8330,DloDtGMQHje,JHSbsQ2fXqr,AIRrOu._40Z,NFZXv8AEpS_,Iiic3t4NuxV,F2YCtRNSfDv,GOUMAmZiRrq,O0VYoHcyq49,FDQoaBH15Bp,ClBB5dSGmCc,GTwmQptJYkk,BWH4O3GPNSL,APv6JsKkF9X,IAURKhjtcRF,Jw1dz4fJmFr slice:true source:all";
+ char buf[] = "systemd-journal after:-8640000 before:0 direction:backward last:200 data_only:false slice:true facets: source:all";
// char buf[] = "systemd-journal after:1695332964 before:1695937764 direction:backward last:100 slice:true source:all DHKucpqUoe1:PtVoyIuX.MU";
// char buf[] = "systemd-journal after:1694511062 before:1694514662 anchor:1694514122024403";
function_systemd_journal("123", buf, &stop_monotonic_ut, &cancelled,
@@ -113,13 +113,12 @@ int main(int argc __maybe_unused, char **argv __maybe_unused) {
// ------------------------------------------------------------------------
- usec_t step_ut = 100 * USEC_PER_MS;
usec_t send_newline_ut = 0;
usec_t since_last_scan_ut = SYSTEMD_JOURNAL_ALL_FILES_SCAN_EVERY_USEC * 2; // something big to trigger scanning at start
- bool tty = isatty(fileno(stdout)) == 1;
+ const bool tty = isatty(fileno(stdout)) == 1;
heartbeat_t hb;
- heartbeat_init(&hb);
+ heartbeat_init(&hb, USEC_PER_SEC);
while(!plugin_should_exit) {
if(since_last_scan_ut > SYSTEMD_JOURNAL_ALL_FILES_SCAN_EVERY_USEC) {
@@ -127,7 +126,7 @@ int main(int argc __maybe_unused, char **argv __maybe_unused) {
since_last_scan_ut = 0;
}
- usec_t dt_ut = heartbeat_next(&hb, step_ut);
+ usec_t dt_ut = heartbeat_next(&hb);
since_last_scan_ut += dt_ut;
send_newline_ut += dt_ut;
diff --git a/src/collectors/systemd-journal.plugin/systemd-units.c b/src/collectors/systemd-journal.plugin/systemd-units.c
index 0e096dae1..a5f670d68 100644
--- a/src/collectors/systemd-journal.plugin/systemd-units.c
+++ b/src/collectors/systemd-journal.plugin/systemd-units.c
@@ -1153,7 +1153,10 @@ static void netdata_systemd_units_function_help(const char *transaction) {
);
netdata_mutex_lock(&stdout_mutex);
- pluginsd_function_result_to_stdout(transaction, HTTP_RESP_OK, "text/plain", now_realtime_sec() + 3600, wb);
+ wb->response_code = HTTP_RESP_OK;
+ wb->content_type = CT_TEXT_PLAIN;
+ wb->expires = now_realtime_sec() + 3600;
+ pluginsd_function_result_to_stdout(transaction, wb);
netdata_mutex_unlock(&stdout_mutex);
buffer_free(wb);
@@ -1169,7 +1172,10 @@ static void netdata_systemd_units_function_info(const char *transaction) {
buffer_json_finalize(wb);
netdata_mutex_lock(&stdout_mutex);
- pluginsd_function_result_to_stdout(transaction, HTTP_RESP_OK, "text/plain", now_realtime_sec() + 3600, wb);
+ wb->response_code = HTTP_RESP_OK;
+ wb->content_type = CT_TEXT_PLAIN;
+ wb->expires = now_realtime_sec() + 3600;
+ pluginsd_function_result_to_stdout(transaction, wb);
netdata_mutex_unlock(&stdout_mutex);
buffer_free(wb);
@@ -1601,7 +1607,7 @@ void function_systemd_units(const char *transaction, char *function,
BUFFER *payload __maybe_unused, HTTP_ACCESS access __maybe_unused,
const char *source __maybe_unused, void *data __maybe_unused) {
char *words[SYSTEMD_UNITS_MAX_PARAMS] = { NULL };
- size_t num_words = quoted_strings_splitter_pluginsd(function, words, SYSTEMD_UNITS_MAX_PARAMS);
+ size_t num_words = quoted_strings_splitter_whitespace(function, words, SYSTEMD_UNITS_MAX_PARAMS);
for(int i = 1; i < SYSTEMD_UNITS_MAX_PARAMS ;i++) {
char *keyword = get_word(words, num_words, i);
if(!keyword) break;
@@ -1958,7 +1964,10 @@ void function_systemd_units(const char *transaction, char *function,
buffer_json_finalize(wb);
netdata_mutex_lock(&stdout_mutex);
- pluginsd_function_result_to_stdout(transaction, HTTP_RESP_OK, "application/json", now_realtime_sec() + 1, wb);
+ wb->response_code = HTTP_RESP_OK;
+ wb->content_type = CT_APPLICATION_JSON;
+ wb->expires = now_realtime_sec() + 1;
+ pluginsd_function_result_to_stdout(transaction, wb);
netdata_mutex_unlock(&stdout_mutex);
buffer_free(wb);
diff --git a/src/collectors/tc.plugin/integrations/tc_qos_classes.md b/src/collectors/tc.plugin/integrations/tc_qos_classes.md
index 2928110b3..204fa7de6 100644
--- a/src/collectors/tc.plugin/integrations/tc_qos_classes.md
+++ b/src/collectors/tc.plugin/integrations/tc_qos_classes.md
@@ -93,7 +93,7 @@ There are no alerts configured by default for this integration.
In order to view tc classes, you need to create the file `/etc/netdata/tc-qos-helper.conf` with content:
-```conf
+```text
tc_show="class"
```
@@ -116,8 +116,8 @@ The file format is a modified INI syntax. The general structure is:
[section2]
option3 = some third value
```
-You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the
+Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
```bash
cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
@@ -144,7 +144,7 @@ A basic example configuration using classes defined in `/etc/iproute2/tc_cls`.
An example of class IDs mapped to names in that file can be:
-```conf
+```text
2:1 Standard
2:8 LowPriorityData
2:10 HighThroughputData
diff --git a/src/collectors/tc.plugin/metadata.yaml b/src/collectors/tc.plugin/metadata.yaml
index f4039a8c5..c40b5aa01 100644
--- a/src/collectors/tc.plugin/metadata.yaml
+++ b/src/collectors/tc.plugin/metadata.yaml
@@ -41,7 +41,7 @@ modules:
description: |
In order to view tc classes, you need to create the file `/etc/netdata/tc-qos-helper.conf` with content:
- ```conf
+ ```text
tc_show="class"
```
configuration:
@@ -74,7 +74,7 @@ modules:
An example of class IDs mapped to names in that file can be:
- ```conf
+ ```text
2:1 Standard
2:8 LowPriorityData
2:10 HighThroughputData
diff --git a/src/collectors/tc.plugin/plugin_tc.c b/src/collectors/tc.plugin/plugin_tc.c
index da2a39194..7102e216d 100644
--- a/src/collectors/tc.plugin/plugin_tc.c
+++ b/src/collectors/tc.plugin/plugin_tc.c
@@ -912,7 +912,7 @@ void *tc_main(void *ptr) {
uint32_t first_hash;
snprintfz(command, TC_LINE_MAX, "%s/tc-qos-helper.sh", netdata_configured_primary_plugins_dir);
- char *tc_script = config_get("plugin:tc", "script to run to get tc values", command);
+ const char *tc_script = config_get("plugin:tc", "script to run to get tc values", command);
while(service_running(SERVICE_COLLECTORS)) {
struct tc_device *device = NULL;
@@ -928,7 +928,7 @@ void *tc_main(void *ptr) {
}
char buffer[TC_LINE_MAX+1] = "";
- while(fgets(buffer, TC_LINE_MAX, tc_child_instance->child_stdout_fp) != NULL) {
+ while(fgets(buffer, TC_LINE_MAX, spawn_popen_stdout(tc_child_instance)) != NULL) {
if(unlikely(!service_running(SERVICE_COLLECTORS))) break;
buffer[TC_LINE_MAX] = '\0';
diff --git a/src/collectors/timex.plugin/integrations/timex.md b/src/collectors/timex.plugin/integrations/timex.md
index 98bcbe10b..5d758857c 100644
--- a/src/collectors/timex.plugin/integrations/timex.md
+++ b/src/collectors/timex.plugin/integrations/timex.md
@@ -102,8 +102,8 @@ The file format is a modified INI syntax. The general structure is:
[section2]
option3 = some third value
```
-You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the
+Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
```bash
cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
diff --git a/src/collectors/timex.plugin/plugin_timex.c b/src/collectors/timex.plugin/plugin_timex.c
index 6e200c425..381079cf4 100644
--- a/src/collectors/timex.plugin/plugin_timex.c
+++ b/src/collectors/timex.plugin/plugin_timex.c
@@ -50,9 +50,11 @@ void *timex_main(void *ptr)
worker_register("TIMEX");
worker_register_job_name(0, "clock check");
- int update_every = (int)config_get_number(CONFIG_SECTION_TIMEX, "update every", 10);
- if (update_every < localhost->rrd_update_every)
+ int update_every = (int)config_get_duration_seconds(CONFIG_SECTION_TIMEX, "update every", 10);
+ if (update_every < localhost->rrd_update_every) {
update_every = localhost->rrd_update_every;
+ config_set_duration_seconds(CONFIG_SECTION_TIMEX, "update every", update_every);
+ }
int do_sync = config_get_boolean(CONFIG_SECTION_TIMEX, "clock synchronization state", CONFIG_BOOLEAN_YES);
int do_offset = config_get_boolean(CONFIG_SECTION_TIMEX, "time offset", CONFIG_BOOLEAN_YES);
@@ -65,10 +67,10 @@ void *timex_main(void *ptr)
usec_t step = update_every * USEC_PER_SEC;
usec_t real_step = USEC_PER_SEC;
heartbeat_t hb;
- heartbeat_init(&hb);
+ heartbeat_init(&hb, USEC_PER_SEC);
while (service_running(SERVICE_COLLECTORS)) {
worker_is_idle();
- heartbeat_next(&hb, USEC_PER_SEC);
+ heartbeat_next(&hb);
if (real_step < step) {
real_step += USEC_PER_SEC;
diff --git a/src/collectors/plugins.d/local_listeners.c b/src/collectors/utils/local_listeners.c
index 2a729b34d..a2e8968ff 100644
--- a/src/collectors/plugins.d/local_listeners.c
+++ b/src/collectors/utils/local_listeners.c
@@ -1,41 +1,14 @@
// SPDX-License-Identifier: GPL-3.0-or-later
#include "libnetdata/libnetdata.h"
-#include "libnetdata/maps/local-sockets.h"
+#include "libnetdata/local-sockets/local-sockets.h"
#include "libnetdata/required_dummies.h"
// --------------------------------------------------------------------------------------------------------------------
-static const char *protocol_name(LOCAL_SOCKET *n) {
- if(n->local.family == AF_INET) {
- if(n->local.protocol == IPPROTO_TCP)
- return "TCP";
- else if(n->local.protocol == IPPROTO_UDP)
- return "UDP";
- else
- return "UNKNOWN_IPV4";
- }
- else if(is_local_socket_ipv46(n)) {
- if (n->local.protocol == IPPROTO_TCP)
- return "TCP46";
- else if(n->local.protocol == IPPROTO_UDP)
- return "UDP46";
- else
- return "UNKNOWN_IPV46";
- }
- else if(n->local.family == AF_INET6) {
- if (n->local.protocol == IPPROTO_TCP)
- return "TCP6";
- else if(n->local.protocol == IPPROTO_UDP)
- return "UDP6";
- else
- return "UNKNOWN_IPV6";
- }
- else
- return "UNKNOWN";
-}
+static void print_local_listeners(LS_STATE *ls __maybe_unused, const LOCAL_SOCKET *nn, void *data __maybe_unused) {
+ LOCAL_SOCKET *n = (LOCAL_SOCKET *)nn;
-static void print_local_listeners(LS_STATE *ls __maybe_unused, LOCAL_SOCKET *n, void *data __maybe_unused) {
char local_address[INET6_ADDRSTRLEN];
char remote_address[INET6_ADDRSTRLEN];
@@ -52,36 +25,7 @@ static void print_local_listeners(LS_STATE *ls __maybe_unused, LOCAL_SOCKET *n,
ipv6_address_to_txt(&n->remote.ip.ipv6, remote_address);
}
- printf("%s|%s|%u|%s\n", protocol_name(n), local_address, n->local.port, string2str(n->cmdline));
-}
-
-static void print_local_listeners_debug(LS_STATE *ls __maybe_unused, LOCAL_SOCKET *n, void *data __maybe_unused) {
- char local_address[INET6_ADDRSTRLEN];
- char remote_address[INET6_ADDRSTRLEN];
-
- if(n->local.family == AF_INET) {
- ipv4_address_to_txt(n->local.ip.ipv4, local_address);
- ipv4_address_to_txt(n->remote.ip.ipv4, remote_address);
- }
- else if(n->local.family == AF_INET6) {
- ipv6_address_to_txt(&n->local.ip.ipv6, local_address);
- ipv6_address_to_txt(&n->remote.ip.ipv6, remote_address);
- }
-
- printf("%s, direction=%s%s%s%s%s pid=%d, state=0x%0x, ns=%"PRIu64", local=%s[:%u], remote=%s[:%u], uid=%u, comm=%s\n",
- protocol_name(n),
- (n->direction & SOCKET_DIRECTION_LISTEN) ? "LISTEN," : "",
- (n->direction & SOCKET_DIRECTION_INBOUND) ? "INBOUND," : "",
- (n->direction & SOCKET_DIRECTION_OUTBOUND) ? "OUTBOUND," : "",
- (n->direction & (SOCKET_DIRECTION_LOCAL_INBOUND|SOCKET_DIRECTION_LOCAL_OUTBOUND)) ? "LOCAL," : "",
- (n->direction == 0) ? "NONE," : "",
- n->pid,
- (unsigned int)n->state,
- n->net_ns_inode,
- local_address, n->local.port,
- remote_address, n->remote.port,
- n->uid,
- n->comm);
+ printf("%s|%s|%u|%s\n", local_sockets_protocol_name(n), local_address, n->local.port, string2str(n->cmdline));
}
// --------------------------------------------------------------------------------------------------------------------
@@ -106,6 +50,8 @@ int main(int argc, char **argv) {
.comm = false,
.namespaces = true,
.tcp_info = false,
+ .no_mnl = false,
+ .report = false,
.max_errors = 10,
.max_concurrent_namespaces = 10,
@@ -153,11 +99,16 @@ int main(int argc, char **argv) {
"\n"
" Current options:\n"
"\n"
- " %s %s %s %s %s %s %s %s %s\n"
+ " %s %s %s %s %s %s %s %s %s %s %s %s\n"
"\n"
" Option 'debug' enables all sources and all directions and provides\n"
" a full dump of current sockets.\n"
"\n"
+ " Option 'report' reports timings per step while collecting and processing\n"
+ " system information.\n"
+ "\n"
+ " Option 'procfile' uses procfile to read proc files, instead of getline().\n"
+ "\n"
" DIRECTION DETECTION\n"
" The program detects the direction of the sockets using these rules:\n"
"\n"
@@ -203,6 +154,9 @@ int main(int argc, char **argv) {
, ls.config.inbound ? "inbound" : "no-inbound"
, ls.config.outbound ? "outbound" : "no-outbound"
, ls.config.namespaces ? "namespaces" : "no-namespaces"
+ , ls.config.no_mnl ? "no-mnl" : "mnl"
+ , ls.config.procfile ? "procfile" : "no-procfile"
+ , ls.config.report ? "report" : "no-report"
);
exit(1);
}
@@ -228,8 +182,9 @@ int main(int argc, char **argv) {
ls.config.namespaces = true;
ls.config.tcp_info = true;
ls.config.uid = true;
+ ls.config.procfile = false;
ls.config.max_errors = SIZE_MAX;
- ls.config.cb = print_local_listeners_debug;
+ ls.config.cb = local_listeners_print_socket;
debug = true;
}
@@ -285,22 +240,39 @@ int main(int argc, char **argv) {
ls.config.namespaces = positive;
// fprintf(stderr, "%s namespaces\n", positive ? "enabling" : "disabling");
}
+ else if (strcmp("mnl", s) == 0) {
+ ls.config.no_mnl = !positive;
+ // fprintf(stderr, "%s mnl\n", positive ? "enabling" : "disabling");
+ }
+ else if (strcmp("procfile", s) == 0) {
+ ls.config.procfile = positive;
+ // fprintf(stderr, "%s procfile\n", positive ? "enabling" : "disabling");
+ }
+ else if (strcmp("report", s) == 0) {
+ ls.config.report = positive;
+ // fprintf(stderr, "%s report\n", positive ? "enabling" : "disabling");
+ }
else {
fprintf(stderr, "Unknown parameter %s\n", s);
exit(1);
}
}
+#if defined(LOCAL_SOCKETS_USE_SETNS)
SPAWN_SERVER *spawn_server = spawn_server_create(SPAWN_SERVER_OPTION_CALLBACK, NULL, local_sockets_spawn_server_callback, argc, (const char **)argv);
if(spawn_server == NULL) {
fprintf(stderr, "Cannot create spawn server.\n");
exit(1);
}
+
ls.spawn_server = spawn_server;
+#endif
local_sockets_process(&ls);
+#if defined(LOCAL_SOCKETS_USE_SETNS)
spawn_server_destroy(spawn_server);
+#endif
getrusage(RUSAGE_SELF, &ended);
@@ -312,5 +284,56 @@ int main(int argc, char **argv) {
fprintf(stderr, "CPU Usage %llu user, %llu system, %llu total, %zu namespaces, %zu nl requests (without namespaces)\n", user, system, total, ls.stats.namespaces_found, ls.stats.mnl_sends);
}
+ if(ls.config.report) {
+ fprintf(stderr, "\nTIMINGS REPORT:\n");
+ char buf[100];
+ usec_t total_ut = 0;
+ for(size_t i = 0; i < _countof(ls.timings) ;i++) {
+ if (!ls.timings[i].end_ut) continue;
+ usec_t dt_ut = ls.timings[i].end_ut - ls.timings[i].start_ut;
+ total_ut += dt_ut;
+ }
+
+ for(size_t i = 0; i < _countof(ls.timings) ;i++) {
+ if(!ls.timings[i].end_ut) continue;
+ usec_t dt_ut = ls.timings[i].end_ut - ls.timings[i].start_ut;
+ double percent = (100.0 * (double)dt_ut) / (double)total_ut;
+ duration_snprintf(buf, sizeof(buf), (int64_t)dt_ut, "us", true);
+ fprintf(stderr, "%20s: %6.2f%% %s\n", ls.timings[i].name, percent, buf);
+ }
+
+ duration_snprintf(buf, sizeof(buf), (int64_t)total_ut, "us", true);
+ fprintf(stderr, "%20s: %6.2f%% %s\n", "TOTAL", 100.0, buf);
+
+ fprintf(stderr, "\n");
+ fprintf(stderr, "Namespaces [ found: %zu, absent: %zu, invalid: %zu ]\n"
+#if defined(LOCAL_SOCKETS_USE_SETNS)
+ " \\_ forks [ tried: %zu, failed: %zu, unresponsive: %zu ]\n"
+ " \\_ sockets [ new: %zu, existing: %zu ]\n"
+#endif
+ , ls.stats.namespaces_found, ls.stats.namespaces_absent, ls.stats.namespaces_invalid
+#if defined(LOCAL_SOCKETS_USE_SETNS)
+ , ls.stats.namespaces_forks_attempted, ls.stats.namespaces_forks_failed, ls.stats.namespaces_forks_unresponsive
+ , ls.stats.namespaces_sockets_new, ls.stats.namespaces_sockets_existing
+#endif
+ );
+
+ fprintf(stderr, "\n");
+ fprintf(stderr, "Sockets [ found: %zu ]\n",
+ ls.stats.sockets_added);
+
+ fprintf(stderr, "\n");
+ fprintf(stderr, "Main Procfile [ opens: %zu, reads: %zu, resizes: %zu, memory: %zu ]\n"
+ " \\_ reads [ total bytes read: %zu, average read size: %zu, max read size: %zu ]\n"
+ " \\_ max [ max file size: %zu, max lines: %zu, max words: %zu ]\n",
+ ls.stats.ff.opens, ls.stats.ff.reads, ls.stats.ff.resizes, ls.stats.ff.memory,
+ ls.stats.ff.total_read_bytes, ls.stats.ff.total_read_bytes / (ls.stats.ff.reads ? ls.stats.ff.reads : 1), ls.stats.ff.max_read_size,
+ ls.stats.ff.max_source_bytes, ls.stats.ff.max_lines, ls.stats.ff.max_words);
+
+ fprintf(stderr, "\n");
+ fprintf(stderr, "MNL(without namespaces) [ requests: %zu ]\n",
+ ls.stats.mnl_sends);
+ }
+
return 0;
}
diff --git a/src/collectors/plugins.d/ndsudo.c b/src/collectors/utils/ndsudo.c
index d2cf4fae1..e37110bbb 100644
--- a/src/collectors/plugins.d/ndsudo.c
+++ b/src/collectors/utils/ndsudo.c
@@ -14,6 +14,42 @@ struct command {
const char *search[MAX_SEARCH];
} allowed_commands[] = {
{
+ .name = "chronyc-serverstats",
+ .params = "serverstats",
+ .search =
+ {
+ [0] = "chronyc",
+ [1] = NULL,
+ },
+ },
+ {
+ .name = "varnishadm-backend-list",
+ .params = "backend.list",
+ .search =
+ {
+ [0] = "varnishadm",
+ [1] = NULL,
+ },
+ },
+ {
+ .name = "varnishstat-stats",
+ .params = "-1 -t off -n {{instanceName}}",
+ .search =
+ {
+ [0] = "varnishstat",
+ [1] = NULL,
+ },
+ },
+ {
+ .name = "smbstatus-profile",
+ .params = "-P",
+ .search =
+ {
+ [0] = "smbstatus",
+ [1] = NULL,
+ },
+ },
+ {
.name = "exim-bpc",
.params = "-bpc",
.search =
diff --git a/src/collectors/windows-events.plugin/README.md b/src/collectors/windows-events.plugin/README.md
new file mode 100644
index 000000000..ecaa4349a
--- /dev/null
+++ b/src/collectors/windows-events.plugin/README.md
@@ -0,0 +1,289 @@
+# Windows Events plugin
+
+[KEY FEATURES](#key-features) | [EVENTS SOURCES](#events-sources) | [EVENT FIELDS](#event-fields) |
+[PLAY MODE](#play-mode) | [FULL TEXT SEARCH](#full-text-search) | [PERFORMANCE](#query-performance) |
+[CONFIGURATION](#configuration-and-maintenance) | [FAQ](#faq)
+
+The Windows Events plugin by Netdata makes viewing, exploring and analyzing Windows Events simple and
+efficient.
+
+![image](https://github.com/user-attachments/assets/71a1ab1d-5b7b-477e-a4e6-a30275a5710b)
+
+## Key features
+
+- Supports **Windows Event Logs (WEL)**.
+- Supports **Event Tracing for Windows (ETW)** and **TraceLogging (TL)**, when events are routed to Event Log.
+- Allows filtering on all System Events fields.
+- Allows **full text search** (`grep`) on all System and User fields.
+- Provides a **histogram** for log entries over time, with a break down per field-value, for any System Event field and any
+ time-frame.
+- Supports coloring log entries based on severity.
+- In PLAY mode it "tails" all the Events, showing new log entries immediately after they are received.
+
+### Prerequisites
+
+`windows-events.plugin` is a Netdata Function Plugin.
+
+To protect your privacy, as with all Netdata Functions, a free Netdata Cloud user account is required to access it.
+For more information check [this discussion](https://github.com/netdata/netdata/discussions/16136).
+
+## Events Sources
+
+The plugin automatically detects all the available channels and offers a list of "Event Channels".
+
+By default, it aggregates events from all event channels, providing a unified view of all events.
+
+> To improve query performance, we recommend selecting the relevant event channels, before doing more
+> analysis on the events.
+
+In the list of events channels, several shortcuts are added, aggregating events according to various attributes:
+
+- `All`, aggregates events from all available channels. This provides a holistic view of all events in the system.
+- `All-Admin`, `All-Operational`, `All-Analytic` and `All-Debug` aggregates events from channels marked `Admin`, `Operational`, `Analytic` and `Debug`, respectively.
+- `All-Windows`, aggregates events from `Application`, `Security`, `System` and `Setup`.
+- `All-Enabled` and `All-Disabled` aggregates events from channels depending on their status.
+- `All-Forwarded` aggregates events from channels owned by `Microsoft-Windows-EventCollector`.
+- `All-Classic` aggregates events from channels using the Classic Event Log API.
+- `All-Of-X`, where `X` is a provider name, is offered for all providers having more than a channel.
+- `All-In-X`, where `X` is `Backup-Mode`, `Overwrite-Mode`, `StopWhenFull-Mode` and `RetainAndBackup-Mode`, aggregate events based on their channel retention policy.
+
+Channels that are configured but are not queryable, and channels that do not have any events in them, are automatically excluded from the channels list.
+
+## Event Fields
+
+Windows Events are structured with both system-defined fields and user-defined fields.
+The Windows Events plugin primarily works with the system-defined fields, which are consistently available
+across all event types.
+
+### System-defined fields
+
+The system-defined fields are:
+
+1. **EventRecordID**
+ A unique, sequential identifier for the event within the channel. This ID increases as new events are logged.
+
+2. **Version**
+ The version of the event, indicating possible structural changes or updates to the event definition.
+
+ Netdata adds this field automatically when it is not zero.
+
+3. **Level**
+ The severity or importance of the event. Levels can include:
+ - 0: LogAlways (reserved)
+ - 1: Critical
+ - 2: Error
+ - 3: Warning
+ - 4: Informational
+ - 5: Verbose
+
+ Additionally, applications may define their own levels.
+
+ Netdata provides 2 fields: `Level` and `LevelID` for the text and numeric representation of it.
+
+4. **Opcode**
+ The action or state within a provider when the event was logged.
+
+ Netdata provides 2 fields: `Opcode` and `OpcodeID` for the text and numeric representation of it.
+
+5. **EventID**
+ This identifies the event template, linking it to a specific message or event type. Event IDs are provider-specific.
+
+6. **Task**
+ Defines a higher-level categorization or logical grouping for the event, often related to a specific function within the application or provider.
+
+ Netdata provides 2 fields: `Task` and `TaskID` for the text and numeric representation of it.
+
+7. **Qualifiers**
+ Provides additional detail for interpreting the event and is often specific to the event source.
+
+ Netdata adds this field automatically when it is not zero.
+
+8. **ProcessID**
+ The ID of the process that generated the event, useful for pinpointing the source of the event within the system.
+
+9. **ThreadID**
+ The ID of the thread within the process that generated the event, which helps in more detailed debugging scenarios.
+
+10. **Keywords**
+ A categorization field that can be used for event filtering. Keywords are bit flags that represent categories or purposes of the event, providing additional context.
+
+ Netdata provides 2 fields: `Keywords` and `keywordsID` for the text and numeric representation of it.
+
+11. **Provider**
+ The unique identifier (GUID) of the event provider. This is essential for knowing which application or system component generated the event.
+
+ Netdata provides 2 fields: `Provider` and `ProviderGUID` for its name and GUID of it.
+
+12. **ActivityID**
+ A GUID that correlates events generated as part of the same operation or transaction, helping to track activities across different components or stages.
+
+ Netdata adds this field automatically when it is not zero.
+
+13. **RelatedActivityID**
+ A GUID that links related operations or transactions, allowing for tracing complex workflows where one event triggers or relates to another.
+
+ Netdata adds this field automatically when it is not zero.
+
+14. **Timestamp**
+ The timestamp when the event was created. This provides precise timing information about when the event occurred.
+
+15. **User**
+ The system user who logged this event.
+
+ Netdata provides 3 fields: `UserAccount`, `UserDomain` and `UserSID`.
+
+### User-defined fields
+Each event log entry can include up to 100 user-defined fields (per event-id).
+
+Unfortunately, accessing these fields is significantly slower, to a level that is not practical to do so
+when there are more than few thousand log entries to explore. So, Netdata presents them
+with lazy loading.
+
+This prevents Netdata for offering filtering for user-defined fields, although Netdata does support
+full text search on user-defined field values.
+
+### Event fields as columns in the table
+
+The system fields mentioned above are offered as columns on the UI. Use the gear button above the table to
+select visible columns.
+
+### Event fields as filters
+
+The plugin presents the system fields as filters for the query, with counters for each of the possible values
+for the field. This list can be used to quickly check which fields and values are available for the entire
+time-frame of the query, across multiple providers and channels.
+
+### Event fields as histogram sources
+
+The histogram can be based on any of the system fields that are available as filters. This allows you to
+visualize the distribution of events over time based on different criteria such as Level, Provider, or EventID.
+
+## PLAY mode
+
+The PLAY mode in this plugin allows real-time monitoring of new events as they are added to the Windows Event
+Log. This feature works by continuously querying for new events and updating the display.
+
+## Full-text search
+
+The plugin supports searching for text within all system and user fields of the events. This means that while
+user-defined fields are not directly filterable, they are searchable through the full-text search feature.
+
+Keep in mind that query performance is slower while doing full text search, mainly because the plugin
+needs to ask from the system to provide all the user fields values.
+
+## Query performance
+
+The plugin is optimized to work efficiently with Event Logs. It uses several layers of caching and
+similar techniques to offload as much work as possible from the system, offering quick responses even when
+hundreds of thousands of events are within the visible timeframe.
+
+To achieve this level of efficiency, the plugin:
+
+- pre-loads ETW providers' manifests for resolving numeric Levels, Opcodes, Tasks and Keywords to text.
+- caches number to text maps for Levels, Opcodes, Tasks and Keywords per provider for WEL providers.
+- caches user SID to account and domain maps.
+- lazy loads the "expensive" event Message and XML, so that the system is queried only for the visible events.
+
+For Full Text Search:
+
+- requests only the Message and the values of the user-fields from the system, avoiding the "expensive" XML call (which is still lazy-loaded).
+
+The result is a system that is highly efficient for working with moderate volumes (hundreds of thousands) of events.
+
+## Configuration and maintenance
+
+This Netdata plugin does not require any specific configuration. It automatically detects available event logs
+on the system.
+
+## FAQ
+
+### Can I use this plugin on event centralization servers?
+
+Yes. You can centralize your Windows Events using Windows Event Forwarding (WEF) or other event collection
+mechanisms, and then install Netdata on this events centralization server to explore the events of all your
+infrastructure.
+
+This plugin will automatically provide multi-node views of your events and also give you the ability to
+combine the events of multiple servers, as you see fit.
+
+### Can I use this plugin from a parent Netdata?
+
+Yes. When your nodes are connected to a Netdata parent, all their functions are available via the parent's UI.
+So, from the parent UI, you can access the functions of all your nodes.
+
+Keep in mind that to protect your privacy, in order to access Netdata functions, you need a free Netdata Cloud
+account.
+
+### Is any of my data exposed to Netdata Cloud from this plugin?
+
+No. When you access the agent directly, none of your data passes through Netdata Cloud. You need a free Netdata
+Cloud account only to verify your identity and enable the use of Netdata Functions. Once this is done, all the
+data flow directly from your Netdata agent to your web browser.
+
+When you access Netdata via https://app.netdata.cloud, your data travel via Netdata Cloud, but they are not stored
+in Netdata Cloud. This is to allow you access your Netdata agents from anywhere. All communication from/to
+Netdata Cloud is encrypted.
+
+### What are the different types of event logs supported by this plugin?
+
+The plugin supports all the kinds of event logs currently supported by the Windows Event Viewer:
+
+- Windows Event Logs (WEL): The traditional event logging system in Windows.
+- Event Tracing for Windows (ETW): A more detailed and efficient event tracing system.
+- TraceLogging (TL): An extension of ETW that simplifies the process of adding events to your code.
+
+The plugin can access all of these when they are routed to the Windows Event Log.
+
+### How does this plugin handle user-defined fields in Windows Events?
+
+User-defined fields are not directly exposed as table columns or filters in the plugin interface. However,
+they are included in the XML representation of each event, which can be viewed in the info sidebar when
+clicking on an event entry. Additionally, the full-text search feature does search through these
+user-defined fields, allowing you to find specific information even if it's not in the main system fields.
+
+### Can I use this plugin to monitor real-time events?
+
+Yes, the plugin supports a PLAY mode that allows you to monitor events in real-time. When activated, it
+continuously updates to show new events as they are logged, similar to the "tail" functionality in
+Unix-like systems.
+
+### How does the plugin handle large volumes of events?
+
+The plugin is designed to handle moderate volumes of events (hundreds of thousands of events) efficiently.
+
+It is in our roadmap to port the `systemd-journal` sampling techniques to it, for working with very large
+datasets to provide quick responses while still giving accurate representations of the data. However, for
+the best performance, we recommend querying smaller time frames or using more specific filters when dealing
+with extremely large event volumes.
+
+### Can I use this plugin to analyze events from multiple servers?
+
+Yes, if you have set up Windows Event Forwarding (WEF) or another method of centralizing your Windows Events,
+you can use this plugin on the central server to analyze events from multiple sources. The plugin will
+automatically detect the available event sources.
+
+### How does the histogram feature work in this plugin?
+
+The histogram feature provides a visual representation of event frequency over time. You can base the
+histogram on any of the system fields available as filters (such as Level, Provider, or EventID). This
+allows you to quickly identify patterns or anomalies in your event logs.
+
+### Is it possible to export or share the results from this plugin?
+
+While the plugin doesn't have a direct export feature, you can use browser-based methods to save or share
+the results. This could include taking screenshots, using browser print/save as PDF functionality, or
+copying data from the table view. For more advanced data export needs, you might need to use the Windows
+Event Log API directly or other Windows administrative tools.
+
+### How often does the plugin update its data?
+
+The plugin updates its data in real-time when in PLAY mode. In normal mode, it refreshes data based on the
+query you've submitted. The plugin is designed to provide the most up-to-date information available in the
+Windows Event Logs at the time of the query.
+
+## TODO
+
+1. Support Sampling, so that the plugin can respond faster even on very busy systems (millions of events visible).
+2. Support exploring events from live Tracing sessions.
+3. Support exploring events in saved Event Trace Log files (`.etl` files).
+4. Support exploring events in saved Event Logs files (`.evtx` files).
diff --git a/src/collectors/windows-events.plugin/windows-events-fields-cache.c b/src/collectors/windows-events.plugin/windows-events-fields-cache.c
new file mode 100644
index 000000000..4b4b72fa4
--- /dev/null
+++ b/src/collectors/windows-events.plugin/windows-events-fields-cache.c
@@ -0,0 +1,158 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "windows-events-fields-cache.h"
+
+typedef struct field_key {
+ uint64_t value;
+ ND_UUID provider;
+} WEVT_FIELD_KEY;
+
+typedef struct field_value {
+ WEVT_FIELD_KEY key;
+ uint32_t name_size;
+ char name[];
+} WEVT_FIELD_VALUE;
+
+#define SIMPLE_HASHTABLE_NAME _FIELDS_CACHE
+#define SIMPLE_HASHTABLE_VALUE_TYPE WEVT_FIELD_VALUE
+#define SIMPLE_HASHTABLE_KEY_TYPE WEVT_FIELD_KEY
+#define SIMPLE_HASHTABLE_VALUE2KEY_FUNCTION field_cache_value_to_key
+#define SIMPLE_HASHTABLE_COMPARE_KEYS_FUNCTION field_cache_cache_compar
+#define SIMPLE_HASHTABLE_SAMPLE_IMPLEMENTATION 1
+#include "libnetdata/simple_hashtable/simple_hashtable.h"
+
+static inline WEVT_FIELD_KEY *field_cache_value_to_key(WEVT_FIELD_VALUE *p) {
+ return &p->key;
+}
+
+static inline bool field_cache_cache_compar(WEVT_FIELD_KEY *a, WEVT_FIELD_KEY *b) {
+ return memcmp(a, b, sizeof(WEVT_FIELD_KEY)) == 0;
+}
+
+struct ht {
+ SPINLOCK spinlock;
+ size_t allocations;
+ size_t bytes;
+ struct simple_hashtable_FIELDS_CACHE ht;
+};
+
+static struct {
+ bool initialized;
+ struct ht ht[WEVT_FIELD_TYPE_MAX];
+} fdc = {
+ .initialized = false,
+};
+
+void field_cache_init(void) {
+ for(size_t type = 0; type < WEVT_FIELD_TYPE_MAX ; type++) {
+ spinlock_init(&fdc.ht[type].spinlock);
+ simple_hashtable_init_FIELDS_CACHE(&fdc.ht[type].ht, 10000);
+ }
+}
+
+static inline bool should_zero_provider(WEVT_FIELD_TYPE type, uint64_t value) {
+ switch(type) {
+ case WEVT_FIELD_TYPE_LEVEL:
+ return !is_valid_provider_level(value, true);
+
+ case WEVT_FIELD_TYPE_KEYWORD:
+ return !is_valid_provider_keyword(value, true);
+
+ case WEVT_FIELD_TYPE_OPCODE:
+ return !is_valid_provider_opcode(value, true);
+
+ case WEVT_FIELD_TYPE_TASK:
+ return !is_valid_provider_task(value, true);
+
+ default:
+ return false;
+ }
+}
+
+bool field_cache_get(WEVT_FIELD_TYPE type, const ND_UUID *uuid, uint64_t value, TXT_UTF8 *dst) {
+ fatal_assert(type < WEVT_FIELD_TYPE_MAX);
+
+ struct ht *ht = &fdc.ht[type];
+
+ WEVT_FIELD_KEY t = {
+ .value = value,
+ .provider = should_zero_provider(type, value) ? UUID_ZERO : *uuid,
+ };
+ XXH64_hash_t hash = XXH3_64bits(&t, sizeof(t));
+
+ spinlock_lock(&ht->spinlock);
+ SIMPLE_HASHTABLE_SLOT_FIELDS_CACHE *slot = simple_hashtable_get_slot_FIELDS_CACHE(&ht->ht, hash, &t, true);
+ WEVT_FIELD_VALUE *v = SIMPLE_HASHTABLE_SLOT_DATA(slot);
+ spinlock_unlock(&ht->spinlock);
+
+ if(v) {
+ txt_utf8_resize(dst, v->name_size, false);
+ memcpy(dst->data, v->name, v->name_size);
+ dst->used = v->name_size;
+ dst->src = TXT_SOURCE_FIELD_CACHE;
+ return true;
+ }
+
+ return false;
+}
+
+static WEVT_FIELD_VALUE *wevt_create_cache_entry(WEVT_FIELD_KEY *t, TXT_UTF8 *name, size_t *bytes) {
+ *bytes = sizeof(WEVT_FIELD_VALUE) + name->used;
+ WEVT_FIELD_VALUE *v = callocz(1, *bytes);
+ v->key = *t;
+ memcpy(v->name, name->data, name->used);
+ v->name_size = name->used;
+ return v;
+}
+
+//static bool is_numeric(const char *s) {
+// while(*s) {
+// if(!isdigit((uint8_t)*s++))
+// return false;
+// }
+//
+// return true;
+//}
+
+void field_cache_set(WEVT_FIELD_TYPE type, const ND_UUID *uuid, uint64_t value, TXT_UTF8 *name) {
+ fatal_assert(type < WEVT_FIELD_TYPE_MAX);
+
+ struct ht *ht = &fdc.ht[type];
+
+ WEVT_FIELD_KEY t = {
+ .value = value,
+ .provider = should_zero_provider(type, value) ? UUID_ZERO : *uuid,
+ };
+ XXH64_hash_t hash = XXH3_64bits(&t, sizeof(t));
+
+ spinlock_lock(&ht->spinlock);
+ SIMPLE_HASHTABLE_SLOT_FIELDS_CACHE *slot = simple_hashtable_get_slot_FIELDS_CACHE(&ht->ht, hash, &t, true);
+ WEVT_FIELD_VALUE *v = SIMPLE_HASHTABLE_SLOT_DATA(slot);
+ if(!v) {
+ size_t bytes;
+ v = wevt_create_cache_entry(&t, name, &bytes);
+ simple_hashtable_set_slot_FIELDS_CACHE(&ht->ht, slot, hash, v);
+
+ ht->allocations++;
+ ht->bytes += bytes;
+ }
+// else {
+// if((v->name_size == 1 && name->used > 0) || is_numeric(v->name)) {
+// size_t bytes;
+// WEVT_FIELD_VALUE *nv = wevt_create_cache_entry(&t, name, &bytes);
+// simple_hashtable_set_slot_FIELDS_CACHE(&ht->ht, slot, hash, nv);
+// ht->bytes += name->used;
+// ht->bytes -= v->name_size;
+// freez(v);
+// }
+// else if(name->used > 2 && !is_numeric(name->data) && (v->name_size != name->used || strcasecmp(v->name, name->data) != 0)) {
+// const char *a = v->name;
+// const char *b = name->data;
+// int x = 0;
+// x++;
+// }
+// }
+
+ spinlock_unlock(&ht->spinlock);
+}
+
diff --git a/src/collectors/windows-events.plugin/windows-events-fields-cache.h b/src/collectors/windows-events.plugin/windows-events-fields-cache.h
new file mode 100644
index 000000000..a76170d68
--- /dev/null
+++ b/src/collectors/windows-events.plugin/windows-events-fields-cache.h
@@ -0,0 +1,22 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_WINDOWS_EVENTS_FIELDS_CACHE_H
+#define NETDATA_WINDOWS_EVENTS_FIELDS_CACHE_H
+
+#include "windows-events.h"
+
+typedef enum __attribute__((packed)) {
+ WEVT_FIELD_TYPE_LEVEL = 0,
+ WEVT_FIELD_TYPE_OPCODE,
+ WEVT_FIELD_TYPE_KEYWORD,
+ WEVT_FIELD_TYPE_TASK,
+
+ // terminator
+ WEVT_FIELD_TYPE_MAX,
+} WEVT_FIELD_TYPE;
+
+void field_cache_init(void);
+bool field_cache_get(WEVT_FIELD_TYPE type, const ND_UUID *uuid, uint64_t value, TXT_UTF8 *dst);
+void field_cache_set(WEVT_FIELD_TYPE type, const ND_UUID *uuid, uint64_t value, TXT_UTF8 *name);
+
+#endif //NETDATA_WINDOWS_EVENTS_FIELDS_CACHE_H
diff --git a/src/collectors/windows-events.plugin/windows-events-providers.c b/src/collectors/windows-events.plugin/windows-events-providers.c
new file mode 100644
index 000000000..d4c4d35ea
--- /dev/null
+++ b/src/collectors/windows-events.plugin/windows-events-providers.c
@@ -0,0 +1,678 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "windows-events-providers.h"
+
+#define MAX_OPEN_HANDLES_PER_PROVIDER 5
+
+struct provider;
+
+// typedef as PROVIDER_META_HANDLE in include file
+struct provider_meta_handle {
+ pid_t owner; // the owner of the handle, or zero
+ uint32_t locks; // the number of locks the owner has on this handle
+ EVT_HANDLE hMetadata; // the handle
+ struct provider *provider; // a pointer back to the provider
+
+ usec_t created_monotonic_ut; // the monotonic timestamp this handle was created
+
+ // double linked list
+ PROVIDER_META_HANDLE *prev;
+ PROVIDER_META_HANDLE *next;
+};
+
+struct provider_data {
+ uint64_t value; // the mask of the keyword
+ XXH64_hash_t hash; // the hash of the name
+ uint32_t len; // the length of the name
+ char *name; // the name of the keyword in UTF-8
+};
+
+struct provider_list {
+ uint64_t min, max, mask;
+ bool exceeds_data_type; // true when the manifest values exceed the capacity of the EvtXXX() API
+ uint32_t total; // the number of entries in the array
+ struct provider_data *array; // the array of entries, sorted (for binary search)
+};
+
+typedef struct provider_key {
+ ND_UUID uuid; // the Provider GUID
+ DWORD len; // the length of the Provider Name
+ const wchar_t *wname; // the Provider wide-string Name (UTF-16)
+} PROVIDER_KEY;
+
+typedef struct provider {
+ PROVIDER_KEY key;
+ const char *name; // the Provider Name (UTF-8)
+ uint32_t total_handles; // the number of handles allocated
+ uint32_t available_handles; // the number of available handles
+ uint32_t deleted_handles; // the number of deleted handles
+ PROVIDER_META_HANDLE *handles; // a double linked list of all the handles
+
+ WEVT_PROVIDER_PLATFORM platform;
+
+ struct provider_list keyword;
+ struct provider_list tasks;
+ struct provider_list opcodes;
+ struct provider_list levels;
+} PROVIDER;
+
+// A hashtable implementation for Providers
+// using the Provider GUID as key and PROVIDER as value
+#define SIMPLE_HASHTABLE_NAME _PROVIDER
+#define SIMPLE_HASHTABLE_VALUE_TYPE PROVIDER
+#define SIMPLE_HASHTABLE_KEY_TYPE PROVIDER_KEY
+#define SIMPLE_HASHTABLE_VALUE2KEY_FUNCTION provider_value_to_key
+#define SIMPLE_HASHTABLE_COMPARE_KEYS_FUNCTION provider_cache_compar
+#define SIMPLE_HASHTABLE_SAMPLE_IMPLEMENTATION 1
+#include "libnetdata/simple_hashtable/simple_hashtable.h"
+
+static struct {
+ SPINLOCK spinlock;
+ uint32_t total_providers;
+ uint32_t total_handles;
+ uint32_t deleted_handles;
+ struct simple_hashtable_PROVIDER hashtable;
+ ARAL *aral_providers;
+ ARAL *aral_handles;
+} pbc = {
+ .spinlock = NETDATA_SPINLOCK_INITIALIZER,
+};
+
+static void provider_load_list(PROVIDER_META_HANDLE *h, WEVT_VARIANT *content, WEVT_VARIANT *property,
+ TXT_UTF16 *dst, struct provider_list *l, EVT_PUBLISHER_METADATA_PROPERTY_ID property_id);
+
+const char *provider_get_name(PROVIDER_META_HANDLE *p) {
+ return (p && p->provider && p->provider->name) ? p->provider->name : "__UNKNOWN PROVIDER__";
+}
+
+ND_UUID provider_get_uuid(PROVIDER_META_HANDLE *p) {
+ return (p && p->provider) ? p->provider->key.uuid : UUID_ZERO;
+}
+
+static inline PROVIDER_KEY *provider_value_to_key(PROVIDER *p) {
+ return &p->key;
+}
+
+static inline bool provider_cache_compar(PROVIDER_KEY *a, PROVIDER_KEY *b) {
+ return a->len == b->len && UUIDeq(a->uuid, b->uuid) && memcmp(a->wname, b->wname, a->len) == 0;
+}
+
+void provider_cache_init(void) {
+ simple_hashtable_init_PROVIDER(&pbc.hashtable, 100000);
+ pbc.aral_providers = aral_create("wevt_providers", sizeof(PROVIDER), 0, 4096, NULL, NULL, NULL, false, true);
+ pbc.aral_handles = aral_create("wevt_handles", sizeof(PROVIDER_META_HANDLE), 0, 4096, NULL, NULL, NULL, false, true);
+}
+
+static bool provider_property_get(PROVIDER_META_HANDLE *h, WEVT_VARIANT *content, EVT_PUBLISHER_METADATA_PROPERTY_ID property_id) {
+ DWORD bufferUsed = 0;
+
+ if(!EvtGetPublisherMetadataProperty(h->hMetadata, property_id, 0, 0, NULL, &bufferUsed)) {
+ DWORD status = GetLastError();
+ if (status != ERROR_INSUFFICIENT_BUFFER) {
+ nd_log(NDLS_COLLECTORS, NDLP_ERR, "EvtGetPublisherMetadataProperty() failed");
+ goto cleanup;
+ }
+ }
+
+ wevt_variant_resize(content, bufferUsed);
+ if (!EvtGetPublisherMetadataProperty(h->hMetadata, property_id, 0, content->size, content->data, &bufferUsed)) {
+ nd_log(NDLS_COLLECTORS, NDLP_ERR, "EvtGetPublisherMetadataProperty() failed after resize");
+ goto cleanup;
+ }
+
+ return true;
+
+cleanup:
+ return false;
+}
+
+static bool provider_string_property_exists(PROVIDER_META_HANDLE *h, WEVT_VARIANT *content, EVT_PUBLISHER_METADATA_PROPERTY_ID property_id) {
+ if(!provider_property_get(h, content, property_id))
+ return false;
+
+ if(content->data->Type != EvtVarTypeString)
+ return false;
+
+ if(!content->data->StringVal[0])
+ return false;
+
+ return true;
+}
+
+static void provider_detect_platform(PROVIDER_META_HANDLE *h, WEVT_VARIANT *content) {
+ if(UUIDiszero(h->provider->key.uuid))
+ h->provider->platform = WEVT_PLATFORM_WEL;
+ else if(h->hMetadata) {
+ if (provider_string_property_exists(h, content, EvtPublisherMetadataMessageFilePath) ||
+ provider_string_property_exists(h, content, EvtPublisherMetadataResourceFilePath) ||
+ provider_string_property_exists(h, content, EvtPublisherMetadataParameterFilePath))
+ h->provider->platform = WEVT_PLATFORM_ETW;
+ else
+ // The provider cannot be opened, does not have any resource files (message, resource, parameter)
+ h->provider->platform = WEVT_PLATFORM_TL;
+ }
+ else h->provider->platform = WEVT_PLATFORM_ETW;
+}
+
+WEVT_PROVIDER_PLATFORM provider_get_platform(PROVIDER_META_HANDLE *p) {
+ return p->provider->platform;
+}
+
+PROVIDER_META_HANDLE *provider_get(ND_UUID uuid, LPCWSTR providerName) {
+ if(!providerName || !providerName[0])
+ return NULL;
+
+ PROVIDER_KEY key = {
+ .uuid = uuid,
+ .len = wcslen(providerName),
+ .wname = providerName,
+ };
+ XXH64_hash_t hash = XXH3_64bits(providerName, wcslen(key.wname) * sizeof(*key.wname));
+
+ spinlock_lock(&pbc.spinlock);
+
+ SIMPLE_HASHTABLE_SLOT_PROVIDER *slot =
+ simple_hashtable_get_slot_PROVIDER(&pbc.hashtable, hash, &key, true);
+
+ bool load_it = false;
+ PROVIDER *p = SIMPLE_HASHTABLE_SLOT_DATA(slot);
+ if(!p) {
+ p = aral_callocz(pbc.aral_providers);
+ p->key.uuid = key.uuid;
+ p->key.len = key.len;
+ p->key.wname = wcsdup(key.wname);
+ p->name = strdupz(provider2utf8(key.wname));
+ simple_hashtable_set_slot_PROVIDER(&pbc.hashtable, slot, hash, p);
+ load_it = true;
+ pbc.total_providers++;
+ }
+
+ pid_t me = gettid_cached();
+ PROVIDER_META_HANDLE *h;
+ for(h = p->handles; h ;h = h->next) {
+ // find the first that is mine,
+ // or the first not owned by anyone
+ if(!h->owner || h->owner == me)
+ break;
+ }
+
+ if(!h) {
+ h = aral_callocz(pbc.aral_handles);
+ h->provider = p;
+ h->created_monotonic_ut = now_monotonic_usec();
+ h->hMetadata = EvtOpenPublisherMetadata(
+ NULL, // Local machine
+ providerName, // Provider name
+ NULL, // Log file path (NULL for default)
+ 0, // Locale (0 for default locale)
+ 0 // Flags
+ );
+
+ // we put it at the beginning of the list
+ // to find it first if the same owner needs more locks on it
+ DOUBLE_LINKED_LIST_PREPEND_ITEM_UNSAFE(p->handles, h, prev, next);
+ pbc.total_handles++;
+ p->total_handles++;
+ p->available_handles++;
+ }
+
+ if(!h->owner) {
+ fatal_assert(p->available_handles > 0);
+ p->available_handles--;
+ h->owner = me;
+ }
+
+ h->locks++;
+
+ if(load_it) {
+ WEVT_VARIANT content = { 0 };
+ WEVT_VARIANT property = { 0 };
+ TXT_UTF16 unicode = { 0 };
+
+ provider_detect_platform(h, &content);
+ provider_load_list(h, &content, &property, &unicode, &p->keyword, EvtPublisherMetadataKeywords);
+ provider_load_list(h, &content, &property, &unicode, &p->levels, EvtPublisherMetadataLevels);
+ provider_load_list(h, &content, &property, &unicode, &p->opcodes, EvtPublisherMetadataOpcodes);
+ provider_load_list(h, &content, &property, &unicode, &p->tasks, EvtPublisherMetadataTasks);
+
+ txt_utf16_cleanup(&unicode);
+ wevt_variant_cleanup(&content);
+ wevt_variant_cleanup(&property);
+ }
+
+ spinlock_unlock(&pbc.spinlock);
+
+ return h;
+}
+
+EVT_HANDLE provider_handle(PROVIDER_META_HANDLE *h) {
+ return h ? h->hMetadata : NULL;
+}
+
+PROVIDER_META_HANDLE *provider_dup(PROVIDER_META_HANDLE *h) {
+ if(h) h->locks++;
+ return h;
+}
+
+static void provider_meta_handle_delete(PROVIDER_META_HANDLE *h) {
+ PROVIDER *p = h->provider;
+
+ DOUBLE_LINKED_LIST_REMOVE_ITEM_UNSAFE(p->handles, h, prev, next);
+
+ if(h->hMetadata)
+ EvtClose(h->hMetadata);
+
+ aral_freez(pbc.aral_handles, h);
+
+ fatal_assert(pbc.total_handles && p->total_handles && p->available_handles);
+
+ pbc.total_handles--;
+ p->total_handles--;
+
+ pbc.deleted_handles++;
+ p->deleted_handles++;
+
+ p->available_handles--;
+}
+
+void providers_release_unused_handles(void) {
+ usec_t now_ut = now_monotonic_usec();
+
+ spinlock_lock(&pbc.spinlock);
+ for(size_t i = 0; i < pbc.hashtable.size ; i++) {
+ SIMPLE_HASHTABLE_SLOT_PROVIDER *slot = &pbc.hashtable.hashtable[i];
+ PROVIDER *p = SIMPLE_HASHTABLE_SLOT_DATA(slot);
+ if(!p) continue;
+
+ PROVIDER_META_HANDLE *h = p->handles;
+ while(h) {
+ PROVIDER_META_HANDLE *next = h->next;
+
+ if(!h->locks && (now_ut - h->created_monotonic_ut) >= WINDOWS_EVENTS_RELEASE_IDLE_PROVIDER_HANDLES_TIME_UT)
+ provider_meta_handle_delete(h);
+
+ h = next;
+ }
+ }
+ spinlock_unlock(&pbc.spinlock);
+}
+
+void provider_release(PROVIDER_META_HANDLE *h) {
+ if(!h) return;
+ pid_t me = gettid_cached();
+ fatal_assert(h->owner == me);
+ fatal_assert(h->locks > 0);
+ if(--h->locks == 0) {
+ PROVIDER *p = h->provider;
+
+ spinlock_lock(&pbc.spinlock);
+ h->owner = 0;
+
+ if(++p->available_handles > MAX_OPEN_HANDLES_PER_PROVIDER) {
+ // there are too many idle handles on this provider
+ provider_meta_handle_delete(h);
+ }
+ else if(h->next) {
+ // it is not the last, put it at the end
+ DOUBLE_LINKED_LIST_REMOVE_ITEM_UNSAFE(p->handles, h, prev, next);
+ DOUBLE_LINKED_LIST_APPEND_ITEM_UNSAFE(p->handles, h, prev, next);
+ }
+
+ spinlock_unlock(&pbc.spinlock);
+ }
+}
+
+// --------------------------------------------------------------------------------------------------------------------
+// load provider lists
+
+static bool wevt_get_property_from_array(WEVT_VARIANT *property, EVT_HANDLE handle, DWORD dwIndex, EVT_PUBLISHER_METADATA_PROPERTY_ID PropertyId) {
+ DWORD used = 0;
+
+ if (!EvtGetObjectArrayProperty(handle, PropertyId, dwIndex, 0, property->size, property->data, &used)) {
+ DWORD status = GetLastError();
+ if (status != ERROR_INSUFFICIENT_BUFFER) {
+ nd_log(NDLS_COLLECTORS, NDLP_ERR, "EvtGetObjectArrayProperty() failed");
+ return false;
+ }
+
+ wevt_variant_resize(property, used);
+ if (!EvtGetObjectArrayProperty(handle, PropertyId, dwIndex, 0, property->size, property->data, &used)) {
+ nd_log(NDLS_COLLECTORS, NDLP_ERR, "EvtGetObjectArrayProperty() failed");
+ return false;
+ }
+ }
+
+ property->used = used;
+ return true;
+}
+
+// Comparison function for ascending order (for Levels, Opcodes, Tasks)
+static int compare_ascending(const void *a, const void *b) {
+ struct provider_data *d1 = (struct provider_data *)a;
+ struct provider_data *d2 = (struct provider_data *)b;
+
+ if (d1->value < d2->value) return -1;
+ if (d1->value > d2->value) return 1;
+ return 0;
+}
+
+//// Comparison function for descending order (for Keywords)
+//static int compare_descending(const void *a, const void *b) {
+// struct provider_data *d1 = (struct provider_data *)a;
+// struct provider_data *d2 = (struct provider_data *)b;
+//
+// if (d1->value > d2->value) return -1;
+// if (d1->value < d2->value) return 1;
+// return 0;
+//}
+
+static void provider_load_list(PROVIDER_META_HANDLE *h, WEVT_VARIANT *content, WEVT_VARIANT *property,
+ TXT_UTF16 *dst, struct provider_list *l, EVT_PUBLISHER_METADATA_PROPERTY_ID property_id) {
+ if(!h || !h->hMetadata) return;
+
+ EVT_PUBLISHER_METADATA_PROPERTY_ID name_id, message_id, value_id;
+ uint8_t value_bits = 32;
+ int (*compare_func)(const void *, const void *);
+ bool (*is_valid)(uint64_t, bool);
+
+ switch(property_id) {
+ case EvtPublisherMetadataLevels:
+ name_id = EvtPublisherMetadataLevelName;
+ message_id = EvtPublisherMetadataLevelMessageID;
+ value_id = EvtPublisherMetadataLevelValue;
+ value_bits = 32;
+ compare_func = compare_ascending;
+ is_valid = is_valid_provider_level;
+ break;
+
+ case EvtPublisherMetadataOpcodes:
+ name_id = EvtPublisherMetadataOpcodeName;
+ message_id = EvtPublisherMetadataOpcodeMessageID;
+ value_id = EvtPublisherMetadataOpcodeValue;
+ value_bits = 32;
+ is_valid = is_valid_provider_opcode;
+ compare_func = compare_ascending;
+ break;
+
+ case EvtPublisherMetadataTasks:
+ name_id = EvtPublisherMetadataTaskName;
+ message_id = EvtPublisherMetadataTaskMessageID;
+ value_id = EvtPublisherMetadataTaskValue;
+ value_bits = 32;
+ is_valid = is_valid_provider_task;
+ compare_func = compare_ascending;
+ break;
+
+ case EvtPublisherMetadataKeywords:
+ name_id = EvtPublisherMetadataKeywordName;
+ message_id = EvtPublisherMetadataKeywordMessageID;
+ value_id = EvtPublisherMetadataKeywordValue;
+ value_bits = 64;
+ is_valid = is_valid_provider_keyword;
+ compare_func = NULL;
+ break;
+
+ default:
+ nd_log(NDLS_COLLECTORS, NDLP_ERR, "Internal Error: Can't handle property id %u", property_id);
+ return;
+ }
+
+ EVT_HANDLE hMetadata = h->hMetadata;
+ EVT_HANDLE hArray = NULL;
+ DWORD itemCount = 0;
+
+ // Get the metadata array for the list (e.g., opcodes, tasks, or levels)
+ if(!provider_property_get(h, content, property_id))
+ goto cleanup;
+
+ // Get the number of items (e.g., levels, tasks, or opcodes)
+ hArray = content->data->EvtHandleVal;
+ if (!EvtGetObjectArraySize(hArray, &itemCount)) {
+ nd_log(NDLS_COLLECTORS, NDLP_ERR, "EvtGetObjectArraySize() failed");
+ goto cleanup;
+ }
+
+ if (itemCount == 0) {
+ l->total = 0;
+ l->array = NULL;
+ goto cleanup;
+ }
+
+ // Allocate memory for the list items
+ l->array = callocz(itemCount, sizeof(struct provider_data));
+ l->total = itemCount;
+
+ uint64_t min = UINT64_MAX, max = 0, mask = 0;
+
+ // Iterate over the list and populate the entries
+ for (DWORD i = 0; i < itemCount; ++i) {
+ struct provider_data *d = &l->array[i];
+
+ // Get the value (e.g., opcode, task, or level)
+ if (wevt_get_property_from_array(property, hArray, i, value_id)) {
+ switch(value_bits) {
+ case 64:
+ d->value = wevt_field_get_uint64(property->data);
+ break;
+
+ case 32:
+ d->value = wevt_field_get_uint32(property->data);
+ break;
+ }
+
+ if(d->value < min)
+ min = d->value;
+
+ if(d->value > max)
+ max = d->value;
+
+ mask |= d->value;
+
+ if(!is_valid(d->value, false))
+ l->exceeds_data_type = true;
+ }
+
+ // Get the message ID
+ if (wevt_get_property_from_array(property, hArray, i, message_id)) {
+ uint32_t messageID = wevt_field_get_uint32(property->data);
+
+ if (messageID != (uint32_t)-1) {
+ if (EvtFormatMessage_utf16(dst, hMetadata, NULL, messageID, EvtFormatMessageId)) {
+ size_t len;
+ d->name = utf16_to_utf8_strdupz(dst->data, &len);
+ d->len = len;
+ }
+ }
+ }
+
+ // Get the name if the message is missing
+ if (!d->name && wevt_get_property_from_array(property, hArray, i, name_id)) {
+ fatal_assert(property->data->Type == EvtVarTypeString);
+ size_t len;
+ d->name = utf16_to_utf8_strdupz(property->data->StringVal, &len);
+ d->len = len;
+ }
+
+ // Calculate the hash for the name
+ if (d->name)
+ d->hash = XXH3_64bits(d->name, d->len);
+ }
+
+ l->min = min;
+ l->max = max;
+ l->mask = mask;
+
+ if(itemCount > 1 && compare_func != NULL) {
+ // Sort the array based on the value (ascending for all except keywords, descending for keywords)
+ qsort(l->array, itemCount, sizeof(struct provider_data), compare_func);
+ }
+
+cleanup:
+ if (hArray)
+ EvtClose(hArray);
+}
+
+// --------------------------------------------------------------------------------------------------------------------
+// lookup functions
+
+// lookup bitmap metdata (returns a comma separated list of strings)
+static bool provider_bitmap_metadata(TXT_UTF8 *dst, struct provider_list *l, uint64_t value) {
+ if(!(value & l->mask) || !l->total || !l->array || l->exceeds_data_type)
+ return false;
+
+ // do not empty the buffer, there may be reserved keywords in it
+ // dst->used = 0;
+
+ if(dst->used)
+ dst->used--;
+
+ size_t added = 0;
+ for(size_t k = 0; value && k < l->total; k++) {
+ struct provider_data *d = &l->array[k];
+
+ if(d->value && (value & d->value) == d->value && d->name && d->len) {
+ const char *s = d->name;
+ size_t slen = d->len;
+
+ // remove the mask from the value
+ value &= ~(d->value);
+
+ txt_utf8_resize(dst, dst->used + slen + 2 + 1, true);
+
+ if(dst->used) {
+ // add a comma and a space
+ dst->data[dst->used++] = ',';
+ dst->data[dst->used++] = ' ';
+ }
+
+ memcpy(&dst->data[dst->used], s, slen);
+ dst->used += slen;
+ dst->src = TXT_SOURCE_PROVIDER;
+ added++;
+ }
+ }
+
+ if(dst->used > 1) {
+ txt_utf8_resize(dst, dst->used + 1, true);
+ dst->data[dst->used++] = 0;
+ }
+
+ fatal_assert(dst->used <= dst->size);
+ return added;
+}
+
+//// lookup a single value (returns its string)
+//static bool provider_value_metadata_linear(TXT_UTF8 *dst, struct provider_list *l, uint64_t value) {
+// if(value < l->min || value > l->max || !l->total || !l->array || l->exceeds_data_type)
+// return false;
+//
+// dst->used = 0;
+//
+// for(size_t k = 0; k < l->total; k++) {
+// struct provider_data *d = &l->array[k];
+//
+// if(d->value == value && d->name && d->len) {
+// const char *s = d->name;
+// size_t slen = d->len;
+//
+// txt_utf8_resize(dst, slen + 1, false);
+//
+// memcpy(dst->data, s, slen);
+// dst->used = slen;
+// dst->src = TXT_SOURCE_PROVIDER;
+//
+// break;
+// }
+// }
+//
+// if(dst->used) {
+// txt_utf8_resize(dst, dst->used + 1, true);
+// dst->data[dst->used++] = 0;
+// }
+//
+// fatal_assert(dst->used <= dst->size);
+//
+// return (dst->used > 0);
+//}
+
+static bool provider_value_metadata(TXT_UTF8 *dst, struct provider_list *l, uint64_t value) {
+ if(value < l->min || value > l->max || !l->total || !l->array || l->exceeds_data_type)
+ return false;
+
+ // if(l->total < 3) return provider_value_metadata_linear(dst, l, value);
+
+ dst->used = 0;
+
+ size_t left = 0;
+ size_t right = l->total - 1;
+
+ // Binary search within bounds
+ while (left <= right) {
+ size_t mid = left + (right - left) / 2;
+ struct provider_data *d = &l->array[mid];
+
+ if (d->value == value) {
+ // Value found, now check if it has a valid name and length
+ if (d->name && d->len) {
+ const char *s = d->name;
+ size_t slen = d->len;
+
+ txt_utf8_resize(dst, slen + 1, false);
+ memcpy(dst->data, s, slen);
+ dst->used = slen;
+ dst->data[dst->used++] = 0;
+ dst->src = TXT_SOURCE_PROVIDER;
+ }
+ break;
+ }
+
+ if (d->value < value)
+ left = mid + 1;
+ else {
+ if (mid == 0) break;
+ right = mid - 1;
+ }
+ }
+
+ fatal_assert(dst->used <= dst->size);
+ return (dst->used > 0);
+}
+
+// --------------------------------------------------------------------------------------------------------------------
+// public API to lookup metadata
+
+bool provider_keyword_cacheable(PROVIDER_META_HANDLE *h) {
+ return h && !h->provider->keyword.exceeds_data_type;
+}
+
+bool provider_tasks_cacheable(PROVIDER_META_HANDLE *h) {
+ return h && !h->provider->tasks.exceeds_data_type;
+}
+
+bool is_useful_provider_for_levels(PROVIDER_META_HANDLE *h) {
+ return h && !h->provider->levels.exceeds_data_type;
+}
+
+bool provider_opcodes_cacheable(PROVIDER_META_HANDLE *h) {
+ return h && !h->provider->opcodes.exceeds_data_type;
+}
+
+bool provider_get_keywords(TXT_UTF8 *dst, PROVIDER_META_HANDLE *h, uint64_t value) {
+ if(!h) return false;
+ return provider_bitmap_metadata(dst, &h->provider->keyword, value);
+}
+
+bool provider_get_level(TXT_UTF8 *dst, PROVIDER_META_HANDLE *h, uint64_t value) {
+ if(!h) return false;
+ return provider_value_metadata(dst, &h->provider->levels, value);
+}
+
+bool provider_get_task(TXT_UTF8 *dst, PROVIDER_META_HANDLE *h, uint64_t value) {
+ if(!h) return false;
+ return provider_value_metadata(dst, &h->provider->tasks, value);
+}
+
+bool provider_get_opcode(TXT_UTF8 *dst, PROVIDER_META_HANDLE *h, uint64_t value) {
+ if(!h) return false;
+ return provider_value_metadata(dst, &h->provider->opcodes, value);
+}
diff --git a/src/collectors/windows-events.plugin/windows-events-providers.h b/src/collectors/windows-events.plugin/windows-events-providers.h
new file mode 100644
index 000000000..b6d476c5c
--- /dev/null
+++ b/src/collectors/windows-events.plugin/windows-events-providers.h
@@ -0,0 +1,41 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_WINDOWS_EVENTS_PROVIDERS_H
+#define NETDATA_WINDOWS_EVENTS_PROVIDERS_H
+
+typedef enum __attribute__((packed)) {
+ WEVT_PLATFORM_UNKNOWN = 0,
+ WEVT_PLATFORM_WEL,
+ WEVT_PLATFORM_ETW,
+ WEVT_PLATFORM_TL,
+} WEVT_PROVIDER_PLATFORM;
+
+#include "windows-events.h"
+
+struct provider_meta_handle;
+typedef struct provider_meta_handle PROVIDER_META_HANDLE;
+
+PROVIDER_META_HANDLE *provider_get(ND_UUID uuid, LPCWSTR providerName);
+void provider_release(PROVIDER_META_HANDLE *h);
+EVT_HANDLE provider_handle(PROVIDER_META_HANDLE *h);
+PROVIDER_META_HANDLE *provider_dup(PROVIDER_META_HANDLE *h);
+
+void providers_release_unused_handles(void);
+
+const char *provider_get_name(PROVIDER_META_HANDLE *p);
+ND_UUID provider_get_uuid(PROVIDER_META_HANDLE *p);
+
+void provider_cache_init(void);
+
+bool provider_keyword_cacheable(PROVIDER_META_HANDLE *h);
+bool provider_tasks_cacheable(PROVIDER_META_HANDLE *h);
+bool is_useful_provider_for_levels(PROVIDER_META_HANDLE *h);
+bool provider_opcodes_cacheable(PROVIDER_META_HANDLE *h);
+
+bool provider_get_keywords(TXT_UTF8 *dst, PROVIDER_META_HANDLE *h, uint64_t value);
+bool provider_get_level(TXT_UTF8 *dst, PROVIDER_META_HANDLE *h, uint64_t value);
+bool provider_get_task(TXT_UTF8 *dst, PROVIDER_META_HANDLE *h, uint64_t value);
+bool provider_get_opcode(TXT_UTF8 *dst, PROVIDER_META_HANDLE *h, uint64_t value);
+WEVT_PROVIDER_PLATFORM provider_get_platform(PROVIDER_META_HANDLE *p);
+
+#endif //NETDATA_WINDOWS_EVENTS_PROVIDERS_H
diff --git a/src/collectors/windows-events.plugin/windows-events-query-builder.c b/src/collectors/windows-events.plugin/windows-events-query-builder.c
new file mode 100644
index 000000000..75c6fbdca
--- /dev/null
+++ b/src/collectors/windows-events.plugin/windows-events-query-builder.c
@@ -0,0 +1,107 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "windows-events-query-builder.h"
+
+// --------------------------------------------------------------------------------------------------------------------
+// query without XPath
+
+typedef struct static_utf8_8k {
+ char buffer[8192];
+ size_t size;
+ size_t len;
+} STATIC_BUF_8K;
+
+typedef struct static_unicode_16k {
+ wchar_t buffer[16384];
+ size_t size;
+ size_t len;
+} STATIC_UNI_16K;
+
+static bool wevt_foreach_selected_value_cb(FACETS *facets __maybe_unused, size_t id, const char *key, const char *value, void *data) {
+ STATIC_BUF_8K *b = data;
+
+ b->len += snprintfz(&b->buffer[b->len], b->size - b->len,
+ "%s%s=%s",
+ id ? " or " : "", key, value);
+
+ return b->len < b->size;
+}
+
+wchar_t *wevt_generate_query_no_xpath(LOGS_QUERY_STATUS *lqs, BUFFER *wb) {
+ static __thread STATIC_UNI_16K q = {
+ .size = sizeof(q.buffer) / sizeof(wchar_t),
+ .len = 0,
+ };
+ static __thread STATIC_BUF_8K b = {
+ .size = sizeof(b.buffer) / sizeof(char),
+ .len = 0,
+ };
+
+ lqs_query_timeframe(lqs, ANCHOR_DELTA_UT);
+
+ usec_t seek_to = lqs->query.start_ut;
+ if(lqs->rq.direction == FACETS_ANCHOR_DIRECTION_BACKWARD)
+ // windows events queries are limited to millisecond resolution
+ // so, in order not to lose data, we have to add
+ // a millisecond when the direction is backward
+ seek_to += USEC_PER_MS;
+
+ // Convert the microseconds since Unix epoch to FILETIME (used in Windows APIs)
+ FILETIME fileTime = os_unix_epoch_ut_to_filetime(seek_to);
+
+ // Convert FILETIME to SYSTEMTIME for use in XPath
+ SYSTEMTIME systemTime;
+ if (!FileTimeToSystemTime(&fileTime, &systemTime)) {
+ nd_log(NDLS_COLLECTORS, NDLP_ERR, "FileTimeToSystemTime() failed");
+ return NULL;
+ }
+
+ // Format SYSTEMTIME into ISO 8601 format (YYYY-MM-DDTHH:MM:SS.sssZ)
+ q.len = swprintf(q.buffer, q.size,
+ L"Event/System[TimeCreated[@SystemTime%ls\"%04d-%02d-%02dT%02d:%02d:%02d.%03dZ\"]",
+ lqs->rq.direction == FACETS_ANCHOR_DIRECTION_BACKWARD ? L"<=" : L">=",
+ systemTime.wYear, systemTime.wMonth, systemTime.wDay,
+ systemTime.wHour, systemTime.wMinute, systemTime.wSecond, systemTime.wMilliseconds);
+
+ if(lqs->rq.slice) {
+ b.len = snprintf(b.buffer, b.size, " and (");
+ if (facets_foreach_selected_value_in_key(
+ lqs->facets,
+ WEVT_FIELD_LEVEL,
+ sizeof(WEVT_FIELD_LEVEL) - 1,
+ used_hashes_registry,
+ wevt_foreach_selected_value_cb,
+ &b)) {
+ b.len += snprintf(&b.buffer[b.len], b.size - b.len, ")");
+ if (b.len < b.size) {
+ utf82unicode(&q.buffer[q.len], q.size - q.len, b.buffer);
+ q.len = wcslen(q.buffer);
+ }
+ }
+
+ b.len = snprintf(b.buffer, b.size, " and (");
+ if (facets_foreach_selected_value_in_key(
+ lqs->facets,
+ WEVT_FIELD_EVENTID,
+ sizeof(WEVT_FIELD_EVENTID) - 1,
+ used_hashes_registry,
+ wevt_foreach_selected_value_cb,
+ &b)) {
+ b.len += snprintf(&b.buffer[b.len], b.size - b.len, ")");
+ if (b.len < b.size) {
+ utf82unicode(&q.buffer[q.len], q.size - q.len, b.buffer);
+ q.len = wcslen(q.buffer);
+ }
+ }
+ }
+
+ q.len += swprintf(&q.buffer[q.len], q.size - q.len, L"]");
+
+ buffer_json_member_add_string(wb, "_query", channel2utf8(q.buffer));
+
+ return q.buffer;
+}
+
+// --------------------------------------------------------------------------------------------------------------------
+// query with XPath
+
diff --git a/src/collectors/windows-events.plugin/windows-events-query-builder.h b/src/collectors/windows-events.plugin/windows-events-query-builder.h
new file mode 100644
index 000000000..80136e0aa
--- /dev/null
+++ b/src/collectors/windows-events.plugin/windows-events-query-builder.h
@@ -0,0 +1,10 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_WINDOWS_EVENTS_QUERY_BUILDER_H
+#define NETDATA_WINDOWS_EVENTS_QUERY_BUILDER_H
+
+#include "windows-events.h"
+
+wchar_t *wevt_generate_query_no_xpath(LOGS_QUERY_STATUS *lqs, BUFFER *wb);
+
+#endif //NETDATA_WINDOWS_EVENTS_QUERY_BUILDER_H
diff --git a/src/collectors/windows-events.plugin/windows-events-query-evt-variant.c b/src/collectors/windows-events.plugin/windows-events-query-evt-variant.c
new file mode 100644
index 000000000..ee3aa382b
--- /dev/null
+++ b/src/collectors/windows-events.plugin/windows-events-query-evt-variant.c
@@ -0,0 +1,354 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "windows-events.h"
+#include <sddl.h> // For SID string conversion
+
+// Function to append the separator if the buffer is not empty
+static inline void append_separator_if_needed(BUFFER *b, const char *separator) {
+ if (buffer_strlen(b) > 0 && separator != NULL)
+ buffer_strcat(b, separator);
+}
+
+// Helper function to convert UTF16 strings to UTF8 and append to the buffer
+static inline void append_utf16(BUFFER *b, LPCWSTR utf16Str, const char *separator) {
+ if (!utf16Str || !*utf16Str) return;
+
+ append_separator_if_needed(b, separator);
+
+ size_t remaining = b->size - b->len;
+ if(remaining < 128) {
+ buffer_need_bytes(b, 128);
+ remaining = b->size - b->len;
+ }
+
+ bool truncated = false;
+ size_t used = utf16_to_utf8(&b->buffer[b->len], remaining, utf16Str, -1, &truncated);
+ if(truncated) {
+ // we need to resize
+ size_t needed = utf16_to_utf8(NULL, 0, utf16Str, -1, NULL); // find the size needed
+ buffer_need_bytes(b, needed);
+ remaining = b->size - b->len;
+ used = utf16_to_utf8(&b->buffer[b->len], remaining, utf16Str, -1, NULL);
+ }
+
+ if(used) {
+ b->len += used - 1;
+
+ internal_fatal(buffer_strlen(b) != strlen(buffer_tostring(b)),
+ "Buffer length mismatch.");
+ }
+}
+
+// Function to append binary data to the buffer
+static inline void append_binary(BUFFER *b, PBYTE data, DWORD size, const char *separator) {
+ if (data == NULL || size == 0) return;
+
+ append_separator_if_needed(b, separator);
+
+ buffer_need_bytes(b, size * 4);
+ for (DWORD i = 0; i < size; i++) {
+ uint8_t value = data[i];
+ b->buffer[b->len++] = hex_digits[(value & 0xf0) >> 4];
+ b->buffer[b->len++] = hex_digits[(value & 0x0f)];
+ }
+}
+
+// Function to append size_t to the buffer
+static inline void append_size_t(BUFFER *b, size_t size, const char *separator) {
+ append_separator_if_needed(b, separator);
+ buffer_print_uint64(b, size);
+}
+
+// Function to append HexInt32 in hexadecimal format
+static inline void append_uint32_hex(BUFFER *b, UINT32 n, const char *separator) {
+ append_separator_if_needed(b, separator);
+ buffer_print_uint64_hex(b, n);
+}
+
+// Function to append HexInt64 in hexadecimal format
+static inline void append_uint64_hex(BUFFER *b, UINT64 n, const char *separator) {
+ append_separator_if_needed(b, separator);
+ buffer_print_uint64_hex(b, n);
+}
+
+// Function to append various data types to the buffer
+static inline void append_uint64(BUFFER *b, UINT64 n, const char *separator) {
+ append_separator_if_needed(b, separator);
+ buffer_print_uint64(b, n);
+}
+
+static inline void append_int64(BUFFER *b, INT64 n, const char *separator) {
+ append_separator_if_needed(b, separator);
+ buffer_print_int64(b, n);
+}
+
+static inline void append_double(BUFFER *b, double n, const char *separator) {
+ append_separator_if_needed(b, separator);
+ buffer_print_netdata_double(b, n);
+}
+
+static inline void append_guid(BUFFER *b, GUID *guid, const char *separator) {
+ fatal_assert(sizeof(GUID) == sizeof(nd_uuid_t));
+
+ append_separator_if_needed(b, separator);
+
+ ND_UUID *uuid = (ND_UUID *)guid;
+ buffer_need_bytes(b, UUID_STR_LEN);
+ uuid_unparse_lower(uuid->uuid, &b->buffer[b->len]);
+ b->len += UUID_STR_LEN - 1;
+
+ internal_fatal(buffer_strlen(b) != strlen(buffer_tostring(b)),
+ "Buffer length mismatch.");
+}
+
+static inline void append_systime(BUFFER *b, SYSTEMTIME *st, const char *separator) {
+ append_separator_if_needed(b, separator);
+ buffer_sprintf(b, "%04d-%02d-%02d %02d:%02d:%02d",
+ st->wYear, st->wMonth, st->wDay, st->wHour, st->wMinute, st->wSecond);
+}
+
+static inline void append_filetime(BUFFER *b, FILETIME *ft, const char *separator) {
+ SYSTEMTIME st;
+ if (FileTimeToSystemTime(ft, &st))
+ append_systime(b, &st, separator);
+}
+
+static inline void append_sid(BUFFER *b, PSID sid, const char *separator) {
+ cached_sid_to_buffer_append(sid, b, separator);
+}
+
+static inline void append_sbyte(BUFFER *b, INT8 n, const char *separator) {
+ append_separator_if_needed(b, separator);
+ buffer_print_int64(b, n);
+}
+
+static inline void append_byte(BUFFER *b, UINT8 n, const char *separator) {
+ append_separator_if_needed(b, separator);
+ buffer_print_uint64(b, n);
+}
+
+static inline void append_int16(BUFFER *b, INT16 n, const char *separator) {
+ append_separator_if_needed(b, separator);
+ buffer_print_int64(b, n);
+}
+
+static inline void append_uint16(BUFFER *b, UINT16 n, const char *separator) {
+ append_separator_if_needed(b, separator);
+ buffer_print_uint64(b, n);
+}
+
+static inline void append_int32(BUFFER *b, INT32 n, const char *separator) {
+ append_separator_if_needed(b, separator);
+ buffer_print_int64(b, n);
+}
+
+static inline void append_uint32(BUFFER *b, UINT32 n, const char *separator) {
+ append_separator_if_needed(b, separator);
+ buffer_print_uint64(b, n);
+}
+
+// Function to append EVT_HANDLE to the buffer
+static inline void append_evt_handle(BUFFER *b, EVT_HANDLE h, const char *separator) {
+ append_separator_if_needed(b, separator);
+ buffer_print_uint64_hex(b, (uintptr_t)h);
+}
+
+// Function to append XML data (UTF-16) to the buffer
+static inline void append_evt_xml(BUFFER *b, LPCWSTR xmlData, const char *separator) {
+ append_utf16(b, xmlData, separator); // XML data is essentially UTF-16 string
+}
+
+void evt_variant_to_buffer(BUFFER *b, EVT_VARIANT *ev, const char *separator) {
+ if(ev->Type == EvtVarTypeNull) return;
+
+ if (ev->Type & EVT_VARIANT_TYPE_ARRAY) {
+ for (DWORD i = 0; i < ev->Count; i++) {
+ switch (ev->Type & EVT_VARIANT_TYPE_MASK) {
+ case EvtVarTypeString:
+ append_utf16(b, ev->StringArr[i], separator);
+ break;
+
+ case EvtVarTypeAnsiString:
+ if (ev->AnsiStringArr[i] != NULL) {
+ append_utf16(b, (LPCWSTR)ev->AnsiStringArr[i], separator);
+ }
+ break;
+
+ case EvtVarTypeSByte:
+ append_sbyte(b, ev->SByteArr[i], separator);
+ break;
+
+ case EvtVarTypeByte:
+ append_byte(b, ev->ByteArr[i], separator);
+ break;
+
+ case EvtVarTypeInt16:
+ append_int16(b, ev->Int16Arr[i], separator);
+ break;
+
+ case EvtVarTypeUInt16:
+ append_uint16(b, ev->UInt16Arr[i], separator);
+ break;
+
+ case EvtVarTypeInt32:
+ append_int32(b, ev->Int32Arr[i], separator);
+ break;
+
+ case EvtVarTypeUInt32:
+ append_uint32(b, ev->UInt32Arr[i], separator);
+ break;
+
+ case EvtVarTypeInt64:
+ append_int64(b, ev->Int64Arr[i], separator);
+ break;
+
+ case EvtVarTypeUInt64:
+ append_uint64(b, ev->UInt64Arr[i], separator);
+ break;
+
+ case EvtVarTypeSingle:
+ append_double(b, ev->SingleArr[i], separator);
+ break;
+
+ case EvtVarTypeDouble:
+ append_double(b, ev->DoubleArr[i], separator);
+ break;
+
+ case EvtVarTypeGuid:
+ append_guid(b, &ev->GuidArr[i], separator);
+ break;
+
+ case EvtVarTypeFileTime:
+ append_filetime(b, &ev->FileTimeArr[i], separator);
+ break;
+
+ case EvtVarTypeSysTime:
+ append_systime(b, &ev->SysTimeArr[i], separator);
+ break;
+
+ case EvtVarTypeSid:
+ append_sid(b, ev->SidArr[i], separator);
+ break;
+
+ case EvtVarTypeBinary:
+ append_binary(b, ev->BinaryVal, ev->Count, separator);
+ break;
+
+ case EvtVarTypeSizeT:
+ append_size_t(b, ev->SizeTArr[i], separator);
+ break;
+
+ case EvtVarTypeHexInt32:
+ append_uint32_hex(b, ev->UInt32Arr[i], separator);
+ break;
+
+ case EvtVarTypeHexInt64:
+ append_uint64_hex(b, ev->UInt64Arr[i], separator);
+ break;
+
+ case EvtVarTypeEvtHandle:
+ append_evt_handle(b, ev->EvtHandleVal, separator);
+ break;
+
+ case EvtVarTypeEvtXml:
+ append_evt_xml(b, ev->XmlValArr[i], separator);
+ break;
+
+ default:
+ // Skip unknown array types
+ break;
+ }
+ }
+ } else {
+ switch (ev->Type & EVT_VARIANT_TYPE_MASK) {
+ case EvtVarTypeNull:
+ // Do nothing for null types
+ break;
+
+ case EvtVarTypeString:
+ append_utf16(b, ev->StringVal, separator);
+ break;
+
+ case EvtVarTypeAnsiString:
+ append_utf16(b, (LPCWSTR)ev->AnsiStringVal, separator);
+ break;
+
+ case EvtVarTypeSByte:
+ append_sbyte(b, ev->SByteVal, separator);
+ break;
+
+ case EvtVarTypeByte:
+ append_byte(b, ev->ByteVal, separator);
+ break;
+
+ case EvtVarTypeInt16:
+ append_int16(b, ev->Int16Val, separator);
+ break;
+
+ case EvtVarTypeUInt16:
+ append_uint16(b, ev->UInt16Val, separator);
+ break;
+
+ case EvtVarTypeInt32:
+ append_int32(b, ev->Int32Val, separator);
+ break;
+
+ case EvtVarTypeUInt32:
+ append_uint32(b, ev->UInt32Val, separator);
+ break;
+
+ case EvtVarTypeInt64:
+ append_int64(b, ev->Int64Val, separator);
+ break;
+
+ case EvtVarTypeUInt64:
+ append_uint64(b, ev->UInt64Val, separator);
+ break;
+
+ case EvtVarTypeSingle:
+ append_double(b, ev->SingleVal, separator);
+ break;
+
+ case EvtVarTypeDouble:
+ append_double(b, ev->DoubleVal, separator);
+ break;
+
+ case EvtVarTypeBoolean:
+ append_separator_if_needed(b, separator);
+ buffer_strcat(b, ev->BooleanVal ? "true" : "false");
+ break;
+
+ case EvtVarTypeGuid:
+ append_guid(b, ev->GuidVal, separator);
+ break;
+
+ case EvtVarTypeBinary:
+ append_binary(b, ev->BinaryVal, ev->Count, separator);
+ break;
+
+ case EvtVarTypeSizeT:
+ append_size_t(b, ev->SizeTVal, separator);
+ break;
+
+ case EvtVarTypeHexInt32:
+ append_uint32_hex(b, ev->UInt32Val, separator);
+ break;
+
+ case EvtVarTypeHexInt64:
+ append_uint64_hex(b, ev->UInt64Val, separator);
+ break;
+
+ case EvtVarTypeEvtHandle:
+ append_evt_handle(b, ev->EvtHandleVal, separator);
+ break;
+
+ case EvtVarTypeEvtXml:
+ append_evt_xml(b, ev->XmlVal, separator);
+ break;
+
+ default:
+ // Skip unknown types
+ break;
+ }
+ }
+}
diff --git a/src/collectors/windows-events.plugin/windows-events-query.c b/src/collectors/windows-events.plugin/windows-events-query.c
new file mode 100644
index 000000000..fefa72829
--- /dev/null
+++ b/src/collectors/windows-events.plugin/windows-events-query.c
@@ -0,0 +1,717 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "windows-events.h"
+
+static void wevt_event_done(WEVT_LOG *log);
+
+static uint64_t wevt_log_file_size(const wchar_t *channel);
+
+// --------------------------------------------------------------------------------------------------------------------
+
+static const char *EvtGetExtendedStatus_utf8(void) {
+ static __thread wchar_t wbuf[4096];
+ static __thread char buf[4096];
+ DWORD wbuf_used = 0;
+
+ if(EvtGetExtendedStatus(sizeof(wbuf) / sizeof(wchar_t), wbuf, &wbuf_used) == ERROR_SUCCESS) {
+ wbuf[sizeof(wbuf) / sizeof(wchar_t) - 1] = 0;
+ unicode2utf8(buf, sizeof(buf), wbuf);
+ }
+ else
+ buf[0] = '\0';
+
+ // the EvtGetExtendedStatus() may be successful with an empty message
+ if(!buf[0])
+ strncpyz(buf, "no additional information", sizeof(buf) - 1);
+
+ return buf;
+}
+
+// --------------------------------------------------------------------------------------------------------------------
+
+bool EvtFormatMessage_utf16(
+ TXT_UTF16 *dst, EVT_HANDLE hMetadata, EVT_HANDLE hEvent, DWORD dwMessageId, EVT_FORMAT_MESSAGE_FLAGS flags) {
+ dst->used = 0;
+
+ DWORD size = 0;
+ if(!dst->data) {
+ EvtFormatMessage(hMetadata, hEvent, dwMessageId, 0, NULL, flags, 0, NULL, &size);
+ if(!size) {
+ // nd_log(NDLS_COLLECTORS, NDLP_ERR, "EvtFormatMessage() to get message size failed.");
+ goto cleanup;
+ }
+ txt_utf16_resize(dst, size, false);
+ }
+
+ // First, try to get the message using the existing buffer
+ if (!EvtFormatMessage(hMetadata, hEvent, dwMessageId, 0, NULL, flags, dst->size, dst->data, &size) || !dst->data) {
+ if (dst->data && GetLastError() != ERROR_INSUFFICIENT_BUFFER) {
+ // nd_log(NDLS_COLLECTORS, NDLP_ERR, "EvtFormatMessage() failed.");
+ goto cleanup;
+ }
+
+ // Try again with the resized buffer
+ txt_utf16_resize(dst, size, false);
+ if (!EvtFormatMessage(hMetadata, hEvent, dwMessageId, 0, NULL, flags, dst->size, dst->data, &size)) {
+ // nd_log(NDLS_COLLECTORS, NDLP_ERR, "EvtFormatMessage() failed after resizing buffer.");
+ goto cleanup;
+ }
+ }
+
+ // make sure it is null terminated
+ if(size <= dst->size)
+ dst->data[size - 1] = 0;
+ else
+ dst->data[dst->size - 1] = 0;
+
+ // unfortunately we have to calculate the length every time
+ // the size returned may not be the length of the dst string
+ dst->used = wcslen(dst->data) + 1;
+
+ return true;
+
+cleanup:
+ dst->used = 0;
+ return false;
+}
+
+static bool EvtFormatMessage_utf8(
+ TXT_UTF16 *tmp, PROVIDER_META_HANDLE *p, EVT_HANDLE hEvent,
+ TXT_UTF8 *dst, EVT_FORMAT_MESSAGE_FLAGS flags) {
+
+ dst->src = TXT_SOURCE_EVENT_LOG;
+
+ if(EvtFormatMessage_utf16(tmp, provider_handle(p), hEvent, 0, flags))
+ return txt_utf16_to_utf8(dst, tmp);
+
+ txt_utf8_empty(dst);
+ return false;
+}
+
+bool EvtFormatMessage_Event_utf8(TXT_UTF16 *tmp, PROVIDER_META_HANDLE *p, EVT_HANDLE hEvent, TXT_UTF8 *dst) {
+ return EvtFormatMessage_utf8(tmp, p, hEvent, dst, EvtFormatMessageEvent);
+}
+
+bool EvtFormatMessage_Xml_utf8(TXT_UTF16 *tmp, PROVIDER_META_HANDLE *p, EVT_HANDLE hEvent, TXT_UTF8 *dst) {
+ return EvtFormatMessage_utf8(tmp, p, hEvent, dst, EvtFormatMessageXml);
+}
+
+// --------------------------------------------------------------------------------------------------------------------
+
+static void wevt_get_field_from_cache(
+ WEVT_LOG *log, uint64_t value, PROVIDER_META_HANDLE *h,
+ TXT_UTF8 *dst, const ND_UUID *provider,
+ WEVT_FIELD_TYPE cache_type, EVT_FORMAT_MESSAGE_FLAGS flags) {
+
+ if (field_cache_get(cache_type, provider, value, dst))
+ return;
+
+ EvtFormatMessage_utf8(&log->ops.unicode, h, log->hEvent, dst, flags);
+ field_cache_set(cache_type, provider, value, dst);
+}
+
+// --------------------------------------------------------------------------------------------------------------------
+// Level
+
+#define SET_LEN_AND_RETURN(constant) *len = sizeof(constant) - 1; return constant
+
+static inline const char *wevt_level_hardcoded(uint64_t level, size_t *len) {
+ switch(level) {
+ case WEVT_LEVEL_NONE: SET_LEN_AND_RETURN(WEVT_LEVEL_NAME_NONE);
+ case WEVT_LEVEL_CRITICAL: SET_LEN_AND_RETURN(WEVT_LEVEL_NAME_CRITICAL);
+ case WEVT_LEVEL_ERROR: SET_LEN_AND_RETURN(WEVT_LEVEL_NAME_ERROR);
+ case WEVT_LEVEL_WARNING: SET_LEN_AND_RETURN(WEVT_LEVEL_NAME_WARNING);
+ case WEVT_LEVEL_INFORMATION: SET_LEN_AND_RETURN(WEVT_LEVEL_NAME_INFORMATION);
+ case WEVT_LEVEL_VERBOSE: SET_LEN_AND_RETURN(WEVT_LEVEL_NAME_VERBOSE);
+ default: *len = 0; return NULL;
+ }
+}
+
+static void wevt_get_level(WEVT_LOG *log, WEVT_EVENT *ev, PROVIDER_META_HANDLE *h) {
+ TXT_UTF8 *dst = &log->ops.level;
+ uint64_t value = ev->level;
+
+ txt_utf8_empty(dst);
+
+ EVT_FORMAT_MESSAGE_FLAGS flags = EvtFormatMessageLevel;
+ WEVT_FIELD_TYPE cache_type = WEVT_FIELD_TYPE_LEVEL;
+ bool is_provider = is_valid_provider_level(value, true);
+
+ if(!is_provider) {
+ size_t len;
+ const char *hardcoded = wevt_level_hardcoded(value, &len);
+ if(hardcoded) {
+ txt_utf8_set(dst, hardcoded, len);
+ dst->src = TXT_SOURCE_HARDCODED;
+ }
+ else {
+ // since this is not a provider value
+ // we expect to get the system description of it
+ wevt_get_field_from_cache(log, value, h, dst, &ev->provider, cache_type, flags);
+ }
+ }
+ else if (!provider_get_level(dst, h, value)) {
+ // not found in the manifest, get it from the cache
+ wevt_get_field_from_cache(log, value, h, dst, &ev->provider, cache_type, flags);
+ }
+
+ txt_utf8_set_numeric_if_empty(
+ dst, WEVT_PREFIX_LEVEL, sizeof(WEVT_PREFIX_LEVEL) - 1, ev->level);
+}
+
+// --------------------------------------------------------------------------------------------------------------------
+// Opcode
+
+static inline const char *wevt_opcode_hardcoded(uint64_t opcode, size_t *len) {
+ switch(opcode) {
+ case WEVT_OPCODE_INFO: SET_LEN_AND_RETURN(WEVT_OPCODE_NAME_INFO);
+ case WEVT_OPCODE_START: SET_LEN_AND_RETURN(WEVT_OPCODE_NAME_START);
+ case WEVT_OPCODE_STOP: SET_LEN_AND_RETURN(WEVT_OPCODE_NAME_STOP);
+ case WEVT_OPCODE_DC_START: SET_LEN_AND_RETURN(WEVT_OPCODE_NAME_DC_START);
+ case WEVT_OPCODE_DC_STOP: SET_LEN_AND_RETURN(WEVT_OPCODE_NAME_DC_STOP);
+ case WEVT_OPCODE_EXTENSION: SET_LEN_AND_RETURN(WEVT_OPCODE_NAME_EXTENSION);
+ case WEVT_OPCODE_REPLY: SET_LEN_AND_RETURN(WEVT_OPCODE_NAME_REPLY);
+ case WEVT_OPCODE_RESUME: SET_LEN_AND_RETURN(WEVT_OPCODE_NAME_RESUME);
+ case WEVT_OPCODE_SUSPEND: SET_LEN_AND_RETURN(WEVT_OPCODE_NAME_SUSPEND);
+ case WEVT_OPCODE_SEND: SET_LEN_AND_RETURN(WEVT_OPCODE_NAME_SEND);
+ case WEVT_OPCODE_RECEIVE: SET_LEN_AND_RETURN(WEVT_OPCODE_NAME_RECEIVE);
+ default: *len = 0; return NULL;
+ }
+}
+
+static void wevt_get_opcode(WEVT_LOG *log, WEVT_EVENT *ev, PROVIDER_META_HANDLE *h) {
+ TXT_UTF8 *dst = &log->ops.opcode;
+ uint64_t value = ev->opcode;
+
+ txt_utf8_empty(dst);
+
+ EVT_FORMAT_MESSAGE_FLAGS flags = EvtFormatMessageOpcode;
+ WEVT_FIELD_TYPE cache_type = WEVT_FIELD_TYPE_OPCODE;
+ bool is_provider = is_valid_provider_opcode(value, true);
+
+ if(!is_provider) {
+ size_t len;
+ const char *hardcoded = wevt_opcode_hardcoded(value, &len);
+ if(hardcoded) {
+ txt_utf8_set(dst, hardcoded, len);
+ dst->src = TXT_SOURCE_HARDCODED;
+ }
+ else {
+ // since this is not a provider value
+ // we expect to get the system description of it
+ wevt_get_field_from_cache(log, value, h, dst, &ev->provider, cache_type, flags);
+ }
+ }
+ else if (!provider_get_opcode(dst, h, value)) {
+ // not found in the manifest, get it from the cache
+ wevt_get_field_from_cache(log, value, h, dst, &ev->provider, cache_type, flags);
+ }
+
+ txt_utf8_set_numeric_if_empty(
+ dst, WEVT_PREFIX_OPCODE, sizeof(WEVT_PREFIX_OPCODE) - 1, ev->opcode);
+}
+
+// --------------------------------------------------------------------------------------------------------------------
+// Task
+
+static const char *wevt_task_hardcoded(uint64_t task, size_t *len) {
+ switch(task) {
+ case WEVT_TASK_NONE: SET_LEN_AND_RETURN(WEVT_TASK_NAME_NONE);
+ default: *len = 0; return NULL;
+ }
+}
+
+static void wevt_get_task(WEVT_LOG *log, WEVT_EVENT *ev, PROVIDER_META_HANDLE *h) {
+ TXT_UTF8 *dst = &log->ops.task;
+ uint64_t value = ev->task;
+
+ txt_utf8_empty(dst);
+
+ EVT_FORMAT_MESSAGE_FLAGS flags = EvtFormatMessageTask;
+ WEVT_FIELD_TYPE cache_type = WEVT_FIELD_TYPE_TASK;
+ bool is_provider = is_valid_provider_task(value, true);
+
+ if(!is_provider) {
+ size_t len;
+ const char *hardcoded = wevt_task_hardcoded(value, &len);
+ if(hardcoded) {
+ txt_utf8_set(dst, hardcoded, len);
+ dst->src = TXT_SOURCE_HARDCODED;
+ }
+ else {
+ // since this is not a provider value
+ // we expect to get the system description of it
+ wevt_get_field_from_cache(log, value, h, dst, &ev->provider, cache_type, flags);
+ }
+ }
+ else if (!provider_get_task(dst, h, value)) {
+ // not found in the manifest, get it from the cache
+ wevt_get_field_from_cache(log, value, h, dst, &ev->provider, cache_type, flags);
+ }
+
+ txt_utf8_set_numeric_if_empty(
+ dst, WEVT_PREFIX_TASK, sizeof(WEVT_PREFIX_TASK) - 1, ev->task);
+}
+
+// --------------------------------------------------------------------------------------------------------------------
+// Keyword
+
+#define SET_BITS(msk, txt) { .mask = msk, .name = txt, .len = sizeof(txt) - 1, }
+
+static uint64_t wevt_keyword_handle_reserved(uint64_t value, TXT_UTF8 *dst) {
+ struct {
+ uint64_t mask;
+ const char *name;
+ size_t len;
+ } bits[] = {
+ SET_BITS(WEVT_KEYWORD_EVENTLOG_CLASSIC, WEVT_KEYWORD_NAME_EVENTLOG_CLASSIC),
+ SET_BITS(WEVT_KEYWORD_CORRELATION_HINT, WEVT_KEYWORD_NAME_CORRELATION_HINT),
+ SET_BITS(WEVT_KEYWORD_AUDIT_SUCCESS, WEVT_KEYWORD_NAME_AUDIT_SUCCESS),
+ SET_BITS(WEVT_KEYWORD_AUDIT_FAILURE, WEVT_KEYWORD_NAME_AUDIT_FAILURE),
+ SET_BITS(WEVT_KEYWORD_SQM, WEVT_KEYWORD_NAME_SQM),
+ SET_BITS(WEVT_KEYWORD_WDI_DIAG, WEVT_KEYWORD_NAME_WDI_DIAG),
+ SET_BITS(WEVT_KEYWORD_WDI_CONTEXT, WEVT_KEYWORD_NAME_WDI_CONTEXT),
+ SET_BITS(WEVT_KEYWORD_RESPONSE_TIME, WEVT_KEYWORD_NAME_RESPONSE_TIME),
+ };
+
+ txt_utf8_empty(dst);
+
+ for(size_t i = 0; i < sizeof(bits) / sizeof(bits[0]) ;i++) {
+ if((value & bits[i].mask) == bits[i].mask) {
+ txt_utf8_add_keywords_separator_if_needed(dst);
+ txt_utf8_append(dst, bits[i].name, bits[i].len);
+ value &= ~(bits[i].mask);
+ dst->src = TXT_SOURCE_HARDCODED;
+ }
+ }
+
+ // return it without any remaining reserved bits
+ return value & 0x0000FFFFFFFFFFFF;
+}
+
+static void wevt_get_keyword(WEVT_LOG *log, WEVT_EVENT *ev, PROVIDER_META_HANDLE *h) {
+ TXT_UTF8 *dst = &log->ops.keywords;
+
+ if(ev->keywords == WEVT_KEYWORD_NONE) {
+ txt_utf8_set(dst, WEVT_KEYWORD_NAME_NONE, sizeof(WEVT_KEYWORD_NAME_NONE) - 1);
+ dst->src = TXT_SOURCE_HARDCODED;
+ }
+
+ uint64_t value = wevt_keyword_handle_reserved(ev->keywords, dst);
+
+ EVT_FORMAT_MESSAGE_FLAGS flags = EvtFormatMessageKeyword;
+ WEVT_FIELD_TYPE cache_type = WEVT_FIELD_TYPE_KEYWORD;
+
+ if(!value && dst->used <= 1) {
+ // no hardcoded info in the buffer, make it None
+ txt_utf8_set(dst, WEVT_KEYWORD_NAME_NONE, sizeof(WEVT_KEYWORD_NAME_NONE) - 1);
+ dst->src = TXT_SOURCE_HARDCODED;
+ }
+ else if (value && !provider_get_keywords(dst, h, value) && dst->used <= 1) {
+ // the provider did not provide any info and the description is still empty.
+ // the system returns 1 keyword, the highest bit, not a list
+ // so, when we call the system, we pass the original value (ev->keywords)
+ wevt_get_field_from_cache(log, ev->keywords, h, dst, &ev->provider, cache_type, flags);
+ }
+
+ txt_utf8_set_hex_if_empty(
+ dst, WEVT_PREFIX_KEYWORDS, sizeof(WEVT_PREFIX_KEYWORDS) - 1, ev->keywords);
+}
+
+// --------------------------------------------------------------------------------------------------------------------
+// Fetching Events
+
+static inline bool wEvtRender(WEVT_LOG *log, EVT_HANDLE context, WEVT_VARIANT *raw) {
+ DWORD bytes_used = 0, property_count = 0;
+ if (!EvtRender(context, log->hEvent, EvtRenderEventValues, raw->size, raw->data, &bytes_used, &property_count)) {
+ // information exceeds the allocated space
+ if (GetLastError() != ERROR_INSUFFICIENT_BUFFER) {
+ nd_log(NDLS_COLLECTORS, NDLP_ERR,
+ "EvtRender() failed, hRenderSystemContext: 0x%lx, hEvent: 0x%lx, content: 0x%lx, size: %u, extended info: %s",
+ (uintptr_t)context, (uintptr_t)log->hEvent, (uintptr_t)raw->data, raw->size,
+ EvtGetExtendedStatus_utf8());
+ return false;
+ }
+
+ wevt_variant_resize(raw, bytes_used);
+ if (!EvtRender(context, log->hEvent, EvtRenderEventValues, raw->size, raw->data, &bytes_used, &property_count)) {
+ nd_log(NDLS_COLLECTORS, NDLP_ERR,
+ "EvtRender() failed, after bytes_used increase, extended info: %s",
+ EvtGetExtendedStatus_utf8());
+ return false;
+ }
+ }
+ raw->used = bytes_used;
+ raw->count = property_count;
+
+ return true;
+}
+
+static bool wevt_get_next_event_one(WEVT_LOG *log, WEVT_EVENT *ev) {
+ bool ret = false;
+
+ if(!wEvtRender(log, log->hRenderSystemContext, &log->ops.raw.system))
+ goto cleanup;
+
+ EVT_VARIANT *content = log->ops.raw.system.data;
+
+ ev->id = wevt_field_get_uint64(&content[EvtSystemEventRecordId]);
+ ev->event_id = wevt_field_get_uint16(&content[EvtSystemEventID]);
+ ev->level = wevt_field_get_uint8(&content[EvtSystemLevel]);
+ ev->opcode = wevt_field_get_uint8(&content[EvtSystemOpcode]);
+ ev->keywords = wevt_field_get_uint64_hex(&content[EvtSystemKeywords]);
+ ev->version = wevt_field_get_uint8(&content[EvtSystemVersion]);
+ ev->task = wevt_field_get_uint16(&content[EvtSystemTask]);
+ ev->qualifiers = wevt_field_get_uint16(&content[EvtSystemQualifiers]);
+ ev->process_id = wevt_field_get_uint32(&content[EvtSystemProcessID]);
+ ev->thread_id = wevt_field_get_uint32(&content[EvtSystemThreadID]);
+ ev->created_ns = wevt_field_get_filetime_to_ns(&content[EvtSystemTimeCreated]);
+
+ if(log->type & WEVT_QUERY_EXTENDED) {
+ wevt_field_get_string_utf8(&content[EvtSystemChannel], &log->ops.channel);
+ wevt_field_get_string_utf8(&content[EvtSystemComputer], &log->ops.computer);
+ wevt_field_get_string_utf8(&content[EvtSystemProviderName], &log->ops.provider);
+ wevt_get_uuid_by_type(&content[EvtSystemProviderGuid], &ev->provider);
+ wevt_get_uuid_by_type(&content[EvtSystemActivityID], &ev->activity_id);
+ wevt_get_uuid_by_type(&content[EvtSystemRelatedActivityID], &ev->related_activity_id);
+ wevt_field_get_sid(&content[EvtSystemUserID], &log->ops.account, &log->ops.domain, &log->ops.sid);
+
+ PROVIDER_META_HANDLE *p = log->provider =
+ provider_get(ev->provider, content[EvtSystemProviderName].StringVal);
+
+ ev->platform = provider_get_platform(p);
+
+ wevt_get_level(log, ev, p);
+ wevt_get_task(log, ev, p);
+ wevt_get_opcode(log, ev, p);
+ wevt_get_keyword(log, ev, p);
+
+ if(log->type & WEVT_QUERY_EVENT_DATA && wEvtRender(log, log->hRenderUserContext, &log->ops.raw.user)) {
+#if (ON_FTS_PRELOAD_MESSAGE == 1)
+ EvtFormatMessage_Event_utf8(&log->ops.unicode, log->provider, log->hEvent, &log->ops.event);
+#endif
+#if (ON_FTS_PRELOAD_XML == 1)
+ EvtFormatMessage_Xml_utf8(&log->ops.unicode, log->provider, log->hEvent, &log->ops.xml);
+#endif
+#if (ON_FTS_PRELOAD_EVENT_DATA == 1)
+ for(size_t i = 0; i < log->ops.raw.user.count ;i++)
+ evt_variant_to_buffer(log->ops.event_data, &log->ops.raw.user.data[i], " ||| ");
+#endif
+ }
+ }
+
+ ret = true;
+
+cleanup:
+ return ret;
+}
+
+bool wevt_get_next_event(WEVT_LOG *log, WEVT_EVENT *ev) {
+ DWORD size = (log->type & WEVT_QUERY_EXTENDED) ? BATCH_NEXT_EVENT : 1;
+ DWORD max_failures = 10;
+
+ fatal_assert(log && log->hQuery && log->hRenderSystemContext);
+
+ while(max_failures > 0) {
+ if (log->batch.used >= log->batch.size) {
+ log->batch.size = 0;
+ log->batch.used = 0;
+ DWORD err;
+ if(!EvtNext(log->hQuery, size, log->batch.hEvents, INFINITE, 0, &log->batch.size)) {
+ err = GetLastError();
+ if(err == ERROR_NO_MORE_ITEMS)
+ return false; // no data available, return failure
+ }
+
+ if(!log->batch.size) {
+ if(size == 1) {
+ nd_log(NDLS_COLLECTORS, NDLP_ERR,
+ "EvtNext() failed, hQuery: 0x%lx, size: %zu, extended info: %s",
+ (uintptr_t)log->hQuery, (size_t)size, EvtGetExtendedStatus_utf8());
+ return false;
+ }
+
+ // EvtNext() returns true when it can full the array
+ // so, let's retry with a smaller array.
+ size /= 2;
+ if(size < 1) size = 1;
+ continue;
+ }
+ }
+
+ log->query_stats.event_count++;
+ log->log_stats.event_count++;
+
+ // cleanup any previous event data
+ wevt_event_done(log);
+
+ log->hEvent = log->batch.hEvents[log->batch.used];
+ log->batch.hEvents[log->batch.used] = NULL;
+ log->batch.used++;
+
+ if(wevt_get_next_event_one(log, ev))
+ return true;
+ else {
+ log->query_stats.failed_count++;
+ log->log_stats.failed_count++;
+ max_failures--;
+ }
+ }
+
+ return false;
+}
+
+static void wevt_event_done(WEVT_LOG *log) {
+ if (log->provider) {
+ provider_release(log->provider);
+ log->provider = NULL;
+ }
+
+ if (log->hEvent) {
+ EvtClose(log->hEvent);
+ log->hEvent = NULL;
+ }
+
+ log->ops.channel.src = TXT_SOURCE_UNKNOWN;
+ log->ops.provider.src = TXT_SOURCE_UNKNOWN;
+ log->ops.computer.src = TXT_SOURCE_UNKNOWN;
+ log->ops.account.src = TXT_SOURCE_UNKNOWN;
+ log->ops.domain.src = TXT_SOURCE_UNKNOWN;
+ log->ops.sid.src = TXT_SOURCE_UNKNOWN;
+
+ log->ops.event.src = TXT_SOURCE_UNKNOWN;
+ log->ops.level.src = TXT_SOURCE_UNKNOWN;
+ log->ops.keywords.src = TXT_SOURCE_UNKNOWN;
+ log->ops.opcode.src = TXT_SOURCE_UNKNOWN;
+ log->ops.task.src = TXT_SOURCE_UNKNOWN;
+ log->ops.xml.src = TXT_SOURCE_UNKNOWN;
+
+ log->ops.channel.used = 0;
+ log->ops.provider.used = 0;
+ log->ops.computer.used = 0;
+ log->ops.account.used = 0;
+ log->ops.domain.used = 0;
+ log->ops.sid.used = 0;
+
+ log->ops.event.used = 0;
+ log->ops.level.used = 0;
+ log->ops.keywords.used = 0;
+ log->ops.opcode.used = 0;
+ log->ops.task.used = 0;
+ log->ops.xml.used = 0;
+
+ if(log->ops.event_data)
+ log->ops.event_data->len = 0;
+}
+
+// --------------------------------------------------------------------------------------------------------------------
+// Query management
+
+bool wevt_query(WEVT_LOG *log, LPCWSTR channel, LPCWSTR query, EVT_QUERY_FLAGS direction) {
+ wevt_query_done(log);
+ log->log_stats.queries_count++;
+
+ EVT_HANDLE hQuery = EvtQuery(NULL, channel, query, EvtQueryChannelPath | (direction & (EvtQueryReverseDirection | EvtQueryForwardDirection)) | EvtQueryTolerateQueryErrors);
+ if (!hQuery) {
+ nd_log(NDLS_COLLECTORS, NDLP_ERR, "EvtQuery() failed, query: %s | extended info: %s",
+ query2utf8(query), EvtGetExtendedStatus_utf8());
+
+ log->log_stats.queries_failed++;
+ return false;
+ }
+
+ log->hQuery = hQuery;
+ return true;
+}
+
+void wevt_query_done(WEVT_LOG *log) {
+ // close the last working hEvent
+ wevt_event_done(log);
+
+ // close all batched hEvents
+ for(DWORD i = log->batch.used; i < log->batch.size ;i++) {
+ if(log->batch.hEvents[i])
+ EvtClose(log->batch.hEvents[i]);
+
+ log->batch.hEvents[i] = NULL;
+ }
+ log->batch.used = 0;
+ log->batch.size = 0;
+
+ if (log->hQuery) {
+ EvtClose(log->hQuery);
+ log->hQuery = NULL;
+ }
+
+ log->query_stats.event_count = 0;
+ log->query_stats.failed_count = 0;
+}
+
+// --------------------------------------------------------------------------------------------------------------------
+// Log management
+
+WEVT_LOG *wevt_openlog6(WEVT_QUERY_TYPE type) {
+ WEVT_LOG *log = callocz(1, sizeof(*log));
+ log->type = type;
+
+ // create the system render
+ log->hRenderSystemContext = EvtCreateRenderContext(0, NULL, EvtRenderContextSystem);
+ if (!log->hRenderSystemContext) {
+ nd_log(NDLS_COLLECTORS, NDLP_ERR,
+ "EvtCreateRenderContext() on system context failed, extended info: %s",
+ EvtGetExtendedStatus_utf8());
+ goto cleanup;
+ }
+
+ if(type & WEVT_QUERY_EVENT_DATA) {
+ log->hRenderUserContext = EvtCreateRenderContext(0, NULL, EvtRenderContextUser);
+ if (!log->hRenderUserContext) {
+ nd_log(NDLS_COLLECTORS, NDLP_ERR,
+ "EvtCreateRenderContext failed, on user context failed, extended info: %s",
+ EvtGetExtendedStatus_utf8());
+ goto cleanup;
+ }
+
+ log->ops.event_data = buffer_create(4096, NULL);
+ }
+
+ return log;
+
+cleanup:
+ wevt_closelog6(log);
+ return NULL;
+}
+
+void wevt_closelog6(WEVT_LOG *log) {
+ wevt_query_done(log);
+
+ if (log->hRenderSystemContext)
+ EvtClose(log->hRenderSystemContext);
+
+ if (log->hRenderUserContext)
+ EvtClose(log->hRenderUserContext);
+
+ wevt_variant_cleanup(&log->ops.raw.system);
+ wevt_variant_cleanup(&log->ops.raw.user);
+ txt_utf16_cleanup(&log->ops.unicode);
+ txt_utf8_cleanup(&log->ops.channel);
+ txt_utf8_cleanup(&log->ops.provider);
+ txt_utf8_cleanup(&log->ops.computer);
+ txt_utf8_cleanup(&log->ops.account);
+ txt_utf8_cleanup(&log->ops.domain);
+ txt_utf8_cleanup(&log->ops.sid);
+
+ txt_utf8_cleanup(&log->ops.event);
+ txt_utf8_cleanup(&log->ops.level);
+ txt_utf8_cleanup(&log->ops.keywords);
+ txt_utf8_cleanup(&log->ops.opcode);
+ txt_utf8_cleanup(&log->ops.task);
+ txt_utf8_cleanup(&log->ops.xml);
+
+ buffer_free(log->ops.event_data);
+
+ freez(log);
+}
+
+// --------------------------------------------------------------------------------------------------------------------
+// Retention
+
+bool wevt_channel_retention(WEVT_LOG *log, const wchar_t *channel, const wchar_t *query, EVT_RETENTION *retention) {
+ bool ret = false;
+
+ // get the number of the oldest record in the log
+ // "EvtGetLogInfo()" does not work properly with "EvtLogOldestRecordNumber"
+ // we have to get it from the first EventRecordID
+
+ // query the eventlog
+ log->hQuery = EvtQuery(NULL, channel, query, EvtQueryChannelPath | EvtQueryForwardDirection | EvtQueryTolerateQueryErrors);
+ if (!log->hQuery) {
+ if (GetLastError() == ERROR_EVT_CHANNEL_NOT_FOUND)
+ nd_log(NDLS_COLLECTORS, NDLP_ERR, "EvtQuery() for retention failed, channel '%s' not found, cannot get retention, extended info: %s",
+ channel2utf8(channel), EvtGetExtendedStatus_utf8());
+ else
+ nd_log(NDLS_COLLECTORS, NDLP_ERR, "EvtQuery() for retention on channel '%s' failed, cannot get retention, extended info: %s",
+ channel2utf8(channel), EvtGetExtendedStatus_utf8());
+
+ goto cleanup;
+ }
+
+ if (!wevt_get_next_event(log, &retention->first_event))
+ goto cleanup;
+
+ if (!retention->first_event.id) {
+ // no data in the event log
+ retention->first_event = retention->last_event = WEVT_EVENT_EMPTY;
+ ret = true;
+ goto cleanup;
+ }
+ EvtClose(log->hQuery);
+
+ log->hQuery = EvtQuery(NULL, channel, query, EvtQueryChannelPath | EvtQueryReverseDirection | EvtQueryTolerateQueryErrors);
+ if (!log->hQuery) {
+ if (GetLastError() == ERROR_EVT_CHANNEL_NOT_FOUND)
+ nd_log(NDLS_COLLECTORS, NDLP_ERR, "EvtQuery() for retention failed, channel '%s' not found, extended info: %s",
+ channel2utf8(channel), EvtGetExtendedStatus_utf8());
+ else
+ nd_log(NDLS_COLLECTORS, NDLP_ERR, "EvtQuery() for retention on channel '%s' failed, extended info: %s",
+ channel2utf8(channel), EvtGetExtendedStatus_utf8());
+
+ goto cleanup;
+ }
+
+ if (!wevt_get_next_event(log, &retention->last_event) || retention->last_event.id == 0) {
+ // no data in eventlog
+ retention->last_event = retention->first_event;
+ }
+ retention->last_event.id += 1; // we should read the last record
+ ret = true;
+
+cleanup:
+ wevt_query_done(log);
+
+ if(ret) {
+ retention->entries = (channel && !query) ? retention->last_event.id - retention->first_event.id : 0;
+
+ if(retention->last_event.created_ns >= retention->first_event.created_ns)
+ retention->duration_ns = retention->last_event.created_ns - retention->first_event.created_ns;
+ else
+ retention->duration_ns = retention->first_event.created_ns - retention->last_event.created_ns;
+
+ retention->size_bytes = wevt_log_file_size(channel);
+ }
+ else
+ memset(retention, 0, sizeof(*retention));
+
+ return ret;
+}
+
+static uint64_t wevt_log_file_size(const wchar_t *channel) {
+ EVT_HANDLE hLog = NULL;
+ EVT_VARIANT evtVariant;
+ DWORD bufferUsed = 0;
+ uint64_t file_size = 0;
+
+ // Open the event log channel
+ hLog = EvtOpenLog(NULL, channel, EvtOpenChannelPath);
+ if (!hLog) {
+ nd_log(NDLS_COLLECTORS, NDLP_ERR, "EvtOpenLog() on channel '%s' failed, extended info: %s",
+ channel2utf8(channel), EvtGetExtendedStatus_utf8());
+ goto cleanup;
+ }
+
+ // Get the file size of the log
+ if (!EvtGetLogInfo(hLog, EvtLogFileSize, sizeof(evtVariant), &evtVariant, &bufferUsed)) {
+ nd_log(NDLS_COLLECTORS, NDLP_ERR, "EvtGetLogInfo() on channel '%s' failed, extended info: %s",
+ channel2utf8(channel), EvtGetExtendedStatus_utf8());
+ goto cleanup;
+ }
+
+ // Extract the file size from the EVT_VARIANT structure
+ file_size = evtVariant.UInt64Val;
+
+cleanup:
+ if (hLog)
+ EvtClose(hLog);
+
+ return file_size;
+}
diff --git a/src/collectors/windows-events.plugin/windows-events-query.h b/src/collectors/windows-events.plugin/windows-events-query.h
new file mode 100644
index 000000000..3136b23df
--- /dev/null
+++ b/src/collectors/windows-events.plugin/windows-events-query.h
@@ -0,0 +1,296 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_WINDOWS_EVENTS_QUERY_H
+#define NETDATA_WINDOWS_EVENTS_QUERY_H
+
+#include "libnetdata/libnetdata.h"
+#include "windows-events.h"
+
+#define BATCH_NEXT_EVENT 500
+
+typedef struct wevt_event {
+ uint64_t id; // EventRecordId (unique and sequential per channel)
+ uint8_t version;
+ uint8_t level; // The severity of event
+ uint8_t opcode; // we receive this as 8bit, but providers use 32bit
+ uint16_t event_id; // This is the template that defines the message to be shown
+ uint16_t task;
+ uint16_t qualifiers;
+ uint32_t process_id;
+ uint32_t thread_id;
+ uint64_t keywords; // Categorization of the event
+ ND_UUID provider;
+ ND_UUID activity_id;
+ ND_UUID related_activity_id;
+ nsec_t created_ns;
+ WEVT_PROVIDER_PLATFORM platform;
+} WEVT_EVENT;
+
+#define WEVT_EVENT_EMPTY (WEVT_EVENT){ .id = 0, .created_ns = 0, }
+
+typedef struct {
+ EVT_VARIANT *data;
+ DWORD size;
+ DWORD used;
+ DWORD count;
+} WEVT_VARIANT;
+
+typedef struct {
+ WEVT_EVENT first_event;
+ WEVT_EVENT last_event;
+
+ uint64_t entries;
+ nsec_t duration_ns;
+ uint64_t size_bytes;
+} EVT_RETENTION;
+
+struct provider_meta_handle;
+
+typedef enum __attribute__((packed)) {
+ WEVT_QUERY_BASIC = (1 << 0),
+ WEVT_QUERY_EXTENDED = (1 << 1),
+ WEVT_QUERY_EVENT_DATA = (1 << 2),
+} WEVT_QUERY_TYPE;
+
+#define WEVT_QUERY_RETENTION WEVT_QUERY_BASIC
+#define WEVT_QUERY_NORMAL (WEVT_QUERY_BASIC | WEVT_QUERY_EXTENDED)
+#define WEVT_QUERY_FTS (WEVT_QUERY_BASIC | WEVT_QUERY_EXTENDED | WEVT_QUERY_EVENT_DATA)
+
+typedef struct wevt_log {
+ struct {
+ DWORD size;
+ DWORD used;
+ EVT_HANDLE hEvents[BATCH_NEXT_EVENT];
+ } batch;
+
+ EVT_HANDLE hEvent;
+ EVT_HANDLE hQuery;
+ EVT_HANDLE hRenderSystemContext;
+ EVT_HANDLE hRenderUserContext;
+ struct provider_meta_handle *provider;
+
+ WEVT_QUERY_TYPE type;
+
+ struct {
+ struct {
+ // temp buffer used for rendering event log messages
+ // never use directly
+ WEVT_VARIANT system;
+ WEVT_VARIANT user;
+ } raw;
+
+ // temp buffer used for fetching and converting UNICODE and UTF-8
+ // every string operation overwrites it, multiple times per event log entry
+ // it can be used within any function, for its own purposes,
+ // but never share between functions
+ TXT_UTF16 unicode;
+
+ // string attributes of the current event log entry
+ // valid until another event if fetched
+
+ // IMPORTANT:
+ // EVERY FIELD NEEDS ITS OWN BUFFER!
+ // the way facets work, all the field value pointers need to be valid
+ // until the entire row closes, so reusing a buffer for the same field
+ // actually copies the same value to all fields using the same buffer.
+
+ TXT_UTF8 channel;
+ TXT_UTF8 provider;
+ TXT_UTF8 computer;
+ TXT_UTF8 account;
+ TXT_UTF8 domain;
+ TXT_UTF8 sid;
+
+ TXT_UTF8 event; // the message to be shown to the user
+ TXT_UTF8 level;
+ TXT_UTF8 keywords;
+ TXT_UTF8 opcode;
+ TXT_UTF8 task;
+ TXT_UTF8 xml;
+
+ BUFFER *event_data;
+ } ops;
+
+ struct {
+ size_t event_count;
+ size_t failed_count;
+ } query_stats;
+
+ struct {
+ size_t queries_count;
+ size_t queries_failed;
+
+ size_t event_count;
+ size_t failed_count;
+ } log_stats;
+
+} WEVT_LOG;
+
+WEVT_LOG *wevt_openlog6(WEVT_QUERY_TYPE type);
+void wevt_closelog6(WEVT_LOG *log);
+
+bool wevt_channel_retention(WEVT_LOG *log, const wchar_t *channel, const wchar_t *query, EVT_RETENTION *retention);
+
+bool wevt_query(WEVT_LOG *log, LPCWSTR channel, LPCWSTR query, EVT_QUERY_FLAGS direction);
+void wevt_query_done(WEVT_LOG *log);
+
+bool wevt_get_next_event(WEVT_LOG *log, WEVT_EVENT *ev);
+
+bool EvtFormatMessage_utf16(
+ TXT_UTF16 *dst, EVT_HANDLE hMetadata, EVT_HANDLE hEvent, DWORD dwMessageId, EVT_FORMAT_MESSAGE_FLAGS flags);
+
+bool EvtFormatMessage_Event_utf8(TXT_UTF16 *tmp, struct provider_meta_handle *p, EVT_HANDLE hEvent, TXT_UTF8 *dst);
+bool EvtFormatMessage_Xml_utf8(TXT_UTF16 *tmp, struct provider_meta_handle *p, EVT_HANDLE hEvent, TXT_UTF8 *dst);
+
+void evt_variant_to_buffer(BUFFER *b, EVT_VARIANT *ev, const char *separator);
+
+static inline void wevt_variant_cleanup(WEVT_VARIANT *v) {
+ freez(v->data);
+}
+
+static inline void wevt_variant_resize(WEVT_VARIANT *v, size_t required_size) {
+ if(required_size < v->size)
+ return;
+
+ wevt_variant_cleanup(v);
+ v->size = txt_compute_new_size(v->size, required_size);
+ v->data = mallocz(v->size);
+}
+
+static inline void wevt_variant_count_from_used(WEVT_VARIANT *v) {
+ v->count = v->used / sizeof(*v->data);
+}
+
+static inline uint8_t wevt_field_get_uint8(EVT_VARIANT *ev) {
+ if((ev->Type & EVT_VARIANT_TYPE_MASK) == EvtVarTypeNull)
+ return 0;
+
+ fatal_assert((ev->Type & EVT_VARIANT_TYPE_MASK) == EvtVarTypeByte);
+ return ev->ByteVal;
+}
+
+static inline uint16_t wevt_field_get_uint16(EVT_VARIANT *ev) {
+ if((ev->Type & EVT_VARIANT_TYPE_MASK) == EvtVarTypeNull)
+ return 0;
+
+ fatal_assert((ev->Type & EVT_VARIANT_TYPE_MASK) == EvtVarTypeUInt16);
+ return ev->UInt16Val;
+}
+
+static inline uint32_t wevt_field_get_uint32(EVT_VARIANT *ev) {
+ if((ev->Type & EVT_VARIANT_TYPE_MASK) == EvtVarTypeNull)
+ return 0;
+
+ fatal_assert((ev->Type & EVT_VARIANT_TYPE_MASK) == EvtVarTypeUInt32);
+ return ev->UInt32Val;
+}
+
+static inline uint64_t wevt_field_get_uint64(EVT_VARIANT *ev) {
+ if((ev->Type & EVT_VARIANT_TYPE_MASK) == EvtVarTypeNull)
+ return 0;
+
+ fatal_assert((ev->Type & EVT_VARIANT_TYPE_MASK) == EvtVarTypeUInt64);
+ return ev->UInt64Val;
+}
+
+static inline uint64_t wevt_field_get_uint64_hex(EVT_VARIANT *ev) {
+ if((ev->Type & EVT_VARIANT_TYPE_MASK) == EvtVarTypeNull)
+ return 0;
+
+ fatal_assert((ev->Type & EVT_VARIANT_TYPE_MASK) == EvtVarTypeHexInt64);
+ return ev->UInt64Val;
+}
+
+static inline bool wevt_field_get_string_utf8(EVT_VARIANT *ev, TXT_UTF8 *dst) {
+ if((ev->Type & EVT_VARIANT_TYPE_MASK) == EvtVarTypeNull) {
+ txt_utf8_empty(dst);
+ return false;
+ }
+
+ fatal_assert((ev->Type & EVT_VARIANT_TYPE_MASK) == EvtVarTypeString);
+ return wchar_to_txt_utf8(dst, ev->StringVal, -1);
+}
+
+bool cached_sid_to_account_domain_sidstr(PSID sid, TXT_UTF8 *dst_account, TXT_UTF8 *dst_domain, TXT_UTF8 *dst_sid_str);
+static inline bool wevt_field_get_sid(EVT_VARIANT *ev, TXT_UTF8 *dst_account, TXT_UTF8 *dst_domain, TXT_UTF8 *dst_sid_str) {
+ if((ev->Type & EVT_VARIANT_TYPE_MASK) == EvtVarTypeNull) {
+ txt_utf8_empty(dst_account);
+ txt_utf8_empty(dst_domain);
+ txt_utf8_empty(dst_sid_str);
+ return false;
+ }
+
+ fatal_assert((ev->Type & EVT_VARIANT_TYPE_MASK) == EvtVarTypeSid);
+ return cached_sid_to_account_domain_sidstr(ev->SidVal, dst_account, dst_domain, dst_sid_str);
+}
+
+static inline uint64_t wevt_field_get_filetime_to_ns(EVT_VARIANT *ev) {
+ if((ev->Type & EVT_VARIANT_TYPE_MASK) == EvtVarTypeNull)
+ return 0;
+
+ fatal_assert((ev->Type & EVT_VARIANT_TYPE_MASK) == EvtVarTypeFileTime);
+ return os_windows_ulonglong_to_unix_epoch_ns(ev->FileTimeVal);
+}
+
+static inline bool wevt_GUID_to_ND_UUID(ND_UUID *nd_uuid, const GUID *guid) {
+ if(guid && sizeof(GUID) == sizeof(ND_UUID)) {
+ memcpy(nd_uuid->uuid, guid, sizeof(ND_UUID));
+ return true;
+ }
+ else {
+ *nd_uuid = UUID_ZERO;
+ return false;
+ }
+}
+
+static inline bool wevt_get_uuid_by_type(EVT_VARIANT *ev, ND_UUID *dst) {
+ if((ev->Type & EVT_VARIANT_TYPE_MASK) == EvtVarTypeNull) {
+ wevt_GUID_to_ND_UUID(dst, NULL);
+ return false;
+ }
+
+ fatal_assert((ev->Type & EVT_VARIANT_TYPE_MASK) == EvtVarTypeGuid);
+ return wevt_GUID_to_ND_UUID(dst, ev->GuidVal);
+}
+
+// https://learn.microsoft.com/en-us/windows/win32/wes/defining-severity-levels
+static inline bool is_valid_provider_level(uint64_t level, bool strict) {
+ if(strict)
+ // when checking if the name is provider independent
+ return level >= 16 && level <= 255;
+ else
+ // when checking acceptable values in provider manifests
+ return level <= 255;
+}
+
+// https://learn.microsoft.com/en-us/windows/win32/wes/defining-tasks-and-opcodes
+static inline bool is_valid_provider_opcode(uint64_t opcode, bool strict) {
+ if(strict)
+ // when checking if the name is provider independent
+ return opcode >= 10 && opcode <= 239;
+ else
+ // when checking acceptable values in provider manifests
+ return opcode <= 255;
+}
+
+// https://learn.microsoft.com/en-us/windows/win32/wes/defining-tasks-and-opcodes
+static inline bool is_valid_provider_task(uint64_t task, bool strict) {
+ if(strict)
+ // when checking if the name is provider independent
+ return task > 0 && task <= 0xFFFF;
+ else
+ // when checking acceptable values in provider manifests
+ return task <= 0xFFFF;
+}
+
+// https://learn.microsoft.com/en-us/windows/win32/wes/defining-keywords-used-to-classify-types-of-events
+static inline bool is_valid_provider_keyword(uint64_t keyword, bool strict) {
+ if(strict)
+ // when checking if the name is provider independent
+ return keyword > 0 && keyword <= 0x0000FFFFFFFFFFFF;
+ else
+ // when checking acceptable values in provider manifests
+ return true;
+}
+
+#endif //NETDATA_WINDOWS_EVENTS_QUERY_H
diff --git a/src/collectors/windows-events.plugin/windows-events-sources.c b/src/collectors/windows-events.plugin/windows-events-sources.c
new file mode 100644
index 000000000..b931ed059
--- /dev/null
+++ b/src/collectors/windows-events.plugin/windows-events-sources.c
@@ -0,0 +1,644 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "windows-events.h"
+
+//struct {
+// const char *name;
+// const wchar_t *query;
+//} custom_queries[] = {
+// {
+// .name = "All-Administrative-Events",
+// .query = L"<QueryList>\n"
+// " <Query Id=\"0\" Path=\"Application\">\n"
+// " <Select Path=\"Application\">*[System[(Level=1 or Level=2 or Level=3)]]</Select>\n"
+// " <Select Path=\"Security\">*[System[(Level=1 or Level=2 or Level=3)]]</Select>\n"
+// " <Select Path=\"System\">*[System[(Level=1 or Level=2 or Level=3)]]</Select>\n"
+// " <Select Path=\"HardwareEvents\">*[System[(Level=1 or Level=2 or Level=3)]]</Select>\n"
+// " <Select Path=\"Internet Explorer\">*[System[(Level=1 or Level=2 or Level=3)]]</Select>\n"
+// " <Select Path=\"Key Management Service\">*[System[(Level=1 or Level=2 or Level=3)]]</Select>\n"
+// " <Select Path=\"Microsoft-AppV-Client/Admin\">*[System[(Level=1 or Level=2 or Level=3)]]</Select>\n"
+// " <Select Path=\"Microsoft-AppV-Client/Virtual Applications\">*[System[(Level=1 or Level=2 or Level=3)]]</Select>\n"
+// " <Select Path=\"Microsoft-Windows-All-User-Install-Agent/Admin\">*[System[(Level=1 or Level=2 or Level=3)]]</Select>\n"
+// " <Select Path=\"Microsoft-Windows-AppHost/Admin\">*[System[(Level=1 or Level=2 or Level=3)]]</Select>\n"
+// " <Select Path=\"Microsoft-Windows-Application Server-Applications/Admin\">*[System[(Level=1 or Level=2 or Level=3)]]</Select>\n"
+// " <Select Path=\"Microsoft-Windows-AppModel-Runtime/Admin\">*[System[(Level=1 or Level=2 or Level=3)]]</Select>\n"
+// " <Select Path=\"Microsoft-Windows-AppReadiness/Admin\">*[System[(Level=1 or Level=2 or Level=3)]]</Select>\n"
+// " <Select Path=\"Microsoft-Windows-AssignedAccess/Admin\">*[System[(Level=1 or Level=2 or Level=3)]]</Select>\n"
+// " <Select Path=\"Microsoft-Windows-AssignedAccessBroker/Admin\">*[System[(Level=1 or Level=2 or Level=3)]]</Select>\n"
+// " <Select Path=\"Microsoft-Windows-Storage-ATAPort/Admin\">*[System[(Level=1 or Level=2 or Level=3)]]</Select>\n"
+// " <Select Path=\"Microsoft-Windows-BitLocker-DrivePreparationTool/Admin\">*[System[(Level=1 or Level=2 or Level=3)]]</Select>\n"
+// " <Select Path=\"Microsoft-Client-Licensing-Platform/Admin\">*[System[(Level=1 or Level=2 or Level=3)]]</Select>\n"
+// " <Select Path=\"Microsoft-Windows-DataIntegrityScan/Admin\">*[System[(Level=1 or Level=2 or Level=3)]]</Select>\n"
+// " <Select Path=\"Microsoft-Windows-DataIntegrityScan/CrashRecovery\">*[System[(Level=1 or Level=2 or Level=3)]]</Select>\n"
+// " <Select Path=\"Microsoft-Windows-DSC/Admin\">*[System[(Level=1 or Level=2 or Level=3)]]</Select>\n"
+// " <Select Path=\"Microsoft-Windows-DeviceManagement-Enterprise-Diagnostics-Provider/Admin\">*[System[(Level=1 or Level=2 or Level=3)]]</Select>\n"
+// " <Select Path=\"Microsoft-Windows-DeviceManagement-Enterprise-Diagnostics-Provider/Autopilot\">*[System[(Level=1 or Level=2 or Level=3)]]</Select>\n"
+// " <Select Path=\"Microsoft-Windows-DeviceSetupManager/Admin\">*[System[(Level=1 or Level=2 or Level=3)]]</Select>\n"
+// " <Select Path=\"Microsoft-Windows-Dhcp-Client/Admin\">*[System[(Level=1 or Level=2 or Level=3)]]</Select>\n"
+// " <Select Path=\"Microsoft-Windows-Dhcpv6-Client/Admin\">*[System[(Level=1 or Level=2 or Level=3)]]</Select>\n"
+// " <Select Path=\"Microsoft-Windows-Diagnosis-Scripted/Admin\">*[System[(Level=1 or Level=2 or Level=3)]]</Select>\n"
+// " <Select Path=\"Microsoft-Windows-Storage-Disk/Admin\">*[System[(Level=1 or Level=2 or Level=3)]]</Select>\n"
+// " <Select Path=\"Microsoft-Windows-DxgKrnl-Admin\">*[System[(Level=1 or Level=2 or Level=3)]]</Select>\n"
+// " <Select Path=\"Microsoft-Windows-EDP-Application-Learning/Admin\">*[System[(Level=1 or Level=2 or Level=3)]]</Select>\n"
+// " <Select Path=\"Microsoft-Windows-EDP-Audit-Regular/Admin\">*[System[(Level=1 or Level=2 or Level=3)]]</Select>\n"
+// " <Select Path=\"Microsoft-Windows-EDP-Audit-TCB/Admin\">*[System[(Level=1 or Level=2 or Level=3)]]</Select>\n"
+// " <Select Path=\"Microsoft-Client-License-Flexible-Platform/Admin\">*[System[(Level=1 or Level=2 or Level=3)]]</Select>\n"
+// " <Select Path=\"Microsoft-Windows-GenericRoaming/Admin\">*[System[(Level=1 or Level=2 or Level=3)]]</Select>\n"
+// " <Select Path=\"Microsoft-Windows-Hyper-V-Guest-Drivers/Admin\">*[System[(Level=1 or Level=2 or Level=3)]]</Select>\n"
+// " <Select Path=\"Microsoft-Windows-Hyper-V-Hypervisor-Admin\">*[System[(Level=1 or Level=2 or Level=3)]]</Select>\n"
+// " <Select Path=\"Microsoft-Windows-Hyper-V-VID-Admin\">*[System[(Level=1 or Level=2 or Level=3)]]</Select>\n"
+// " <Select Path=\"Microsoft-Windows-Kernel-EventTracing/Admin\">*[System[(Level=1 or Level=2 or Level=3)]]</Select>\n"
+// " <Select Path=\"Microsoft-Windows-KeyboardFilter/Admin\">*[System[(Level=1 or Level=2 or Level=3)]]</Select>\n"
+// " <Select Path=\"Microsoft-Windows-ModernDeployment-Diagnostics-Provider/Admin\">*[System[(Level=1 or Level=2 or Level=3)]]</Select>\n"
+// " <Select Path=\"Microsoft-Windows-ModernDeployment-Diagnostics-Provider/Autopilot\">*[System[(Level=1 or Level=2 or Level=3)]]</Select>\n"
+// " <Select Path=\"Microsoft-Windows-ModernDeployment-Diagnostics-Provider/Diagnostics\">*[System[(Level=1 or Level=2 or Level=3)]]</Select>\n"
+// " <Select Path=\"Microsoft-Windows-ModernDeployment-Diagnostics-Provider/ManagementService\">*[System[(Level=1 or Level=2 or Level=3)]]</Select>\n"
+// " <Select Path=\"Microsoft-Windows-MUI/Admin\">*[System[(Level=1 or Level=2 or Level=3)]]</Select>\n"
+// " <Select Path=\"Microsoft-Windows-PowerShell/Admin\">*[System[(Level=1 or Level=2 or Level=3)]]</Select>\n"
+// " <Select Path=\"Microsoft-Windows-PrintBRM/Admin\">*[System[(Level=1 or Level=2 or Level=3)]]</Select>\n"
+// " <Select Path=\"Microsoft-Windows-PrintService/Admin\">*[System[(Level=1 or Level=2 or Level=3)]]</Select>\n"
+// " <Select Path=\"Microsoft-Windows-Provisioning-Diagnostics-Provider/Admin\">*[System[(Level=1 or Level=2 or Level=3)]]</Select>\n"
+// " <Select Path=\"Microsoft-Windows-Provisioning-Diagnostics-Provider/AutoPilot\">*[System[(Level=1 or Level=2 or Level=3)]]</Select>\n"
+// " <Select Path=\"Microsoft-Windows-Provisioning-Diagnostics-Provider/ManagementService\">*[System[(Level=1 or Level=2 or Level=3)]]</Select>\n"
+// " <Select Path=\"Microsoft-Windows-PushNotification-Platform/Admin\">*[System[(Level=1 or Level=2 or Level=3)]]</Select>\n"
+// " <Select Path=\"Microsoft-Windows-RemoteApp and Desktop Connections/Admin\">*[System[(Level=1 or Level=2 or Level=3)]]</Select>\n"
+// " <Select Path=\"Microsoft-Windows-RemoteAssistance/Admin\">*[System[(Level=1 or Level=2 or Level=3)]]</Select>\n"
+// " <Select Path=\"Microsoft-Windows-RemoteDesktopServices-RdpCoreTS/Admin\">*[System[(Level=1 or Level=2 or Level=3)]]</Select>\n"
+// " <Select Path=\"Microsoft-Windows-RetailDemo/Admin\">*[System[(Level=1 or Level=2 or Level=3)]]</Select>\n"
+// " <Select Path=\"Microsoft-Windows-SecurityMitigationsBroker/Admin\">*[System[(Level=1 or Level=2 or Level=3)]]</Select>\n"
+// " <Select Path=\"Microsoft-Windows-SmartCard-TPM-VCard-Module/Admin\">*[System[(Level=1 or Level=2 or Level=3)]]</Select>\n"
+// " <Select Path=\"Microsoft-Windows-SMBDirect/Admin\">*[System[(Level=1 or Level=2 or Level=3)]]</Select>\n"
+// " <Select Path=\"Microsoft-Windows-SMBWitnessClient/Admin\">*[System[(Level=1 or Level=2 or Level=3)]]</Select>\n"
+// " <Select Path=\"Microsoft-Windows-Storage-Tiering/Admin\">*[System[(Level=1 or Level=2 or Level=3)]]</Select>\n"
+// " <Select Path=\"Microsoft-Windows-Storage-ClassPnP/Admin\">*[System[(Level=1 or Level=2 or Level=3)]]</Select>\n"
+// " <Select Path=\"Microsoft-Windows-Storage-Storport/Admin\">*[System[(Level=1 or Level=2 or Level=3)]]</Select>\n"
+// " <Select Path=\"Microsoft-Windows-TerminalServices-ClientUSBDevices/Admin\">*[System[(Level=1 or Level=2 or Level=3)]]</Select>\n"
+// " <Select Path=\"Microsoft-Windows-TerminalServices-LocalSessionManager/Admin\">*[System[(Level=1 or Level=2 or Level=3)]]</Select>\n"
+// " <Select Path=\"Microsoft-Windows-TerminalServices-PnPDevices/Admin\">*[System[(Level=1 or Level=2 or Level=3)]]</Select>\n"
+// " <Select Path=\"Microsoft-Windows-TerminalServices-Printers/Admin\">*[System[(Level=1 or Level=2 or Level=3)]]</Select>\n"
+// " <Select Path=\"Microsoft-Windows-TerminalServices-RemoteConnectionManager/Admin\">*[System[(Level=1 or Level=2 or Level=3)]]</Select>\n"
+// " <Select Path=\"Microsoft-Windows-TerminalServices-ServerUSBDevices/Admin\">*[System[(Level=1 or Level=2 or Level=3)]]</Select>\n"
+// " <Select Path=\"Microsoft-Windows-Troubleshooting-Recommended/Admin\">*[System[(Level=1 or Level=2 or Level=3)]]</Select>\n"
+// " <Select Path=\"Microsoft-Windows-User Device Registration/Admin\">*[System[(Level=1 or Level=2 or Level=3)]]</Select>\n"
+// " <Select Path=\"Microsoft-Windows-VerifyHardwareSecurity/Admin\">*[System[(Level=1 or Level=2 or Level=3)]]</Select>\n"
+// " <Select Path=\"Microsoft-Windows-WindowsBackup/ActionCenter\">*[System[(Level=1 or Level=2 or Level=3)]]</Select>\n"
+// " <Select Path=\"Microsoft-Windows-Workplace Join/Admin\">*[System[(Level=1 or Level=2 or Level=3)]]</Select>\n"
+// " <Select Path=\"OAlerts\">*[System[(Level=1 or Level=2 or Level=3)]]</Select>\n"
+// " <Select Path=\"OneApp_IGCC\">*[System[(Level=1 or Level=2 or Level=3)]]</Select>\n"
+// " <Select Path=\"OpenSSH/Admin\">*[System[(Level=1 or Level=2 or Level=3)]]</Select>\n"
+// " <Select Path=\"USER_ESRV_SVC_QUEENCREEK\">*[System[(Level=1 or Level=2 or Level=3)]]</Select>\n"
+// " <Select Path=\"Visual Studio\">*[System[(Level=1 or Level=2 or Level=3)]]</Select>\n"
+// " <Select Path=\"Windows PowerShell\">*[System[(Level=1 or Level=2 or Level=3)]]</Select>\n"
+// " </Query>\n"
+// "</QueryList>",
+// },
+// {
+// .name = "All-Remote-Desktop-Services",
+// .query = L"<QueryList>\n"
+// " <Query Id=\"0\" Path=\"Microsoft-Rdms-UI/Admin\">\n"
+// " <Select Path=\"Microsoft-Rdms-UI/Admin\">*</Select>\n"
+// " <Select Path=\"Microsoft-Rdms-UI/Operational\">*</Select>\n"
+// " <Select Path=\"Remote-Desktop-Management-Service/Admin\">*</Select>\n"
+// " <Select Path=\"Remote-Desktop-Management-Service/Operational\">*</Select>\n"
+// " <Select Path=\"Microsoft-Windows-TerminalServices-SessionBroker-Client/Admin\">*</Select>\n"
+// " <Select Path=\"Microsoft-Windows-TerminalServices-SessionBroker-Client/Operational\">*</Select>\n"
+// " <Select Path=\"Microsoft-Windows-TerminalServices-RemoteConnectionManager/Admin\">*</Select>\n"
+// " <Select Path=\"Microsoft-Windows-TerminalServices-RemoteConnectionManager/Operational\">*</Select>\n"
+// " <Select Path=\"Microsoft-Windows-TerminalServices-PnPDevices/Admin\">*</Select>\n"
+// " <Select Path=\"Microsoft-Windows-TerminalServices-PnPDevices/Operational\">*</Select>\n"
+// " <Select Path=\"Microsoft-Windows-RemoteApp and Desktop Connections/Admin\">*</Select>\n"
+// " <Select Path=\"Microsoft-Windows-RemoteApp and Desktop Connection Management/Admin\">*</Select>\n"
+// " <Select Path=\"Microsoft-Windows-RemoteApp and Desktop Connection Management/Operational\">*</Select>\n"
+// " <Select Path=\"Microsoft-Windows-TerminalServices-SessionBroker/Admin\">*</Select>\n"
+// " <Select Path=\"Microsoft-Windows-TerminalServices-SessionBroker/Operational\">*</Select>\n"
+// " <Select Path=\"Microsoft-Windows-TerminalServices-TSV-VmHostAgent/Operational\">*</Select>\n"
+// " <Select Path=\"Microsoft-Windows-TerminalServices-TSV-VmHostAgent/Admin\">*</Select>\n"
+// " <Select Path=\"Microsoft-Windows-TerminalServices-ServerUSBDevices/Operational\">*</Select>\n"
+// " <Select Path=\"Microsoft-Windows-TerminalServices-ServerUSBDevices/Admin\">*</Select>\n"
+// " <Select Path=\"Microsoft-Windows-TerminalServices-LocalSessionManager/Operational\">*</Select>\n"
+// " <Select Path=\"Microsoft-Windows-TerminalServices-LocalSessionManager/Admin\">*</Select>\n"
+// " <Select Path=\"Microsoft-Windows-TerminalServices-ClientUSBDevices/Operational\">*</Select>\n"
+// " <Select Path=\"Microsoft-Windows-TerminalServices-ClientUSBDevices/Admin\">*</Select>\n"
+// " <Select Path=\"Microsoft-Windows-TerminalServices-RDPClient/Operational\">*</Select>\n"
+// " <Select Path=\"Microsoft-Windows-TerminalServices-Licensing/Admin\">*</Select>\n"
+// " <Select Path=\"Microsoft-Windows-TerminalServices-Licensing/Operational\">*</Select>\n"
+// " <Select Path=\"Microsoft-Windows-TerminalServices-Gateway/Admin\">*</Select>\n"
+// " <Select Path=\"Microsoft-Windows-TerminalServices-Gateway/Operational\">*</Select>\n"
+// " </Query>\n"
+// "</QueryList>",
+// },
+// {
+// .name = "All-Security-SPP",
+// .query = L"<QueryList>\n"
+// " <Query Id=\"0\" Path=\"Microsoft-Windows-HelloForBusiness/Operational\">\n"
+// " <Select Path=\"Microsoft-Windows-HelloForBusiness/Operational\">*[System[(Level&gt;5 )]]</Select>\n"
+// " </Query>\n"
+// "</QueryList>",
+// }
+//};
+
+ENUM_STR_MAP_DEFINE(WEVT_SOURCE_TYPE) = {
+ { .id = WEVTS_ALL, .name = WEVT_SOURCE_ALL_NAME },
+ { .id = WEVTS_ADMIN, .name = WEVT_SOURCE_ALL_ADMIN_NAME },
+ { .id = WEVTS_OPERATIONAL, .name = WEVT_SOURCE_ALL_OPERATIONAL_NAME },
+ { .id = WEVTS_ANALYTIC, .name = WEVT_SOURCE_ALL_ANALYTIC_NAME },
+ { .id = WEVTS_DEBUG, .name = WEVT_SOURCE_ALL_DEBUG_NAME },
+ { .id = WEVTS_WINDOWS, .name = WEVT_SOURCE_ALL_WINDOWS_NAME },
+ { .id = WEVTS_ENABLED, .name = WEVT_SOURCE_ALL_ENABLED_NAME },
+ { .id = WEVTS_DISABLED, .name = WEVT_SOURCE_ALL_DISABLED_NAME },
+ { .id = WEVTS_FORWARDED, .name = WEVT_SOURCE_ALL_FORWARDED_NAME },
+ { .id = WEVTS_CLASSIC, .name = WEVT_SOURCE_ALL_CLASSIC_NAME },
+ { .id = WEVTS_BACKUP_MODE, .name = WEVT_SOURCE_ALL_BACKUP_MODE_NAME },
+ { .id = WEVTS_OVERWRITE_MODE, .name = WEVT_SOURCE_ALL_OVERWRITE_MODE_NAME },
+ { .id = WEVTS_STOP_WHEN_FULL_MODE, .name = WEVT_SOURCE_ALL_STOP_WHEN_FULL_MODE_NAME },
+ { .id = WEVTS_RETAIN_AND_BACKUP_MODE, .name = WEVT_SOURCE_ALL_RETAIN_AND_BACKUP_MODE_NAME },
+
+ // terminator
+ { . id = 0, .name = NULL }
+};
+
+BITMAP_STR_DEFINE_FUNCTIONS(WEVT_SOURCE_TYPE, WEVTS_NONE, "");
+
+DICTIONARY *wevt_sources = NULL;
+DICTIONARY *used_hashes_registry = NULL;
+static usec_t wevt_session = 0;
+
+void wevt_sources_del_cb(const DICTIONARY_ITEM *item __maybe_unused, void *value, void *data __maybe_unused) {
+ LOGS_QUERY_SOURCE *src = value;
+ freez((void *)src->fullname);
+ string_freez(src->source);
+
+ src->fullname = NULL;
+ src->source = NULL;
+}
+
+static bool wevt_sources_conflict_cb(const DICTIONARY_ITEM *item __maybe_unused, void *old_value, void *new_value, void *data __maybe_unused) {
+ LOGS_QUERY_SOURCE *src_old = old_value;
+ LOGS_QUERY_SOURCE *src_new = new_value;
+
+ bool ret = false;
+ if(src_new->last_scan_monotonic_ut > src_old->last_scan_monotonic_ut) {
+ src_old->last_scan_monotonic_ut = src_new->last_scan_monotonic_ut;
+
+ if (src_old->source != src_new->source) {
+ string_freez(src_old->source);
+ src_old->source = src_new->source;
+ src_new->source = NULL;
+ }
+ src_old->source_type = src_new->source_type;
+
+ src_old->msg_first_ut = src_new->msg_first_ut;
+ src_old->msg_last_ut = src_new->msg_last_ut;
+ src_old->msg_first_id = src_new->msg_first_id;
+ src_old->msg_last_id = src_new->msg_last_id;
+ src_old->entries = src_new->entries;
+ src_old->size = src_new->size;
+
+ ret = true;
+ }
+
+ freez((void *)src_new->fullname);
+ string_freez(src_new->source);
+ src_new->fullname = NULL;
+ src_new->source = NULL;
+
+ return ret;
+}
+
+void wevt_sources_init(void) {
+ wevt_session = now_realtime_usec();
+
+ used_hashes_registry = dictionary_create(DICT_OPTION_DONT_OVERWRITE_VALUE);
+
+ wevt_sources = dictionary_create_advanced(DICT_OPTION_FIXED_SIZE | DICT_OPTION_DONT_OVERWRITE_VALUE,
+ NULL, sizeof(LOGS_QUERY_SOURCE));
+
+ dictionary_register_delete_callback(wevt_sources, wevt_sources_del_cb, NULL);
+ dictionary_register_conflict_callback(wevt_sources, wevt_sources_conflict_cb, NULL);
+}
+
+void buffer_json_wevt_versions(BUFFER *wb __maybe_unused) {
+ buffer_json_member_add_object(wb, "versions");
+ {
+ buffer_json_member_add_uint64(wb, "sources",
+ wevt_session + dictionary_version(wevt_sources));
+ }
+ buffer_json_object_close(wb);
+}
+
+// --------------------------------------------------------------------------------------------------------------------
+
+int wevt_sources_dict_items_backward_compar(const void *a, const void *b) {
+ const DICTIONARY_ITEM **da = (const DICTIONARY_ITEM **)a, **db = (const DICTIONARY_ITEM **)b;
+ LOGS_QUERY_SOURCE *sa = dictionary_acquired_item_value(*da);
+ LOGS_QUERY_SOURCE *sb = dictionary_acquired_item_value(*db);
+
+ // compare the last message timestamps
+ if(sa->msg_last_ut < sb->msg_last_ut)
+ return 1;
+
+ if(sa->msg_last_ut > sb->msg_last_ut)
+ return -1;
+
+ // compare the first message timestamps
+ if(sa->msg_first_ut < sb->msg_first_ut)
+ return 1;
+
+ if(sa->msg_first_ut > sb->msg_first_ut)
+ return -1;
+
+ return 0;
+}
+
+int wevt_sources_dict_items_forward_compar(const void *a, const void *b) {
+ return -wevt_sources_dict_items_backward_compar(a, b);
+}
+
+// --------------------------------------------------------------------------------------------------------------------
+
+typedef enum {
+ wevt_source_type_internal,
+ wevt_source_type_provider,
+ wevt_source_type_channel,
+} wevt_source_type;
+
+struct wevt_source {
+ wevt_source_type type;
+ usec_t first_ut;
+ usec_t last_ut;
+ size_t count;
+ size_t entries;
+ uint64_t size;
+};
+
+static int wevt_source_to_json_array_cb(const DICTIONARY_ITEM *item, void *entry, void *data) {
+ const struct wevt_source *s = entry;
+ BUFFER *wb = data;
+
+ const char *name = dictionary_acquired_item_name(item);
+
+ if(s->count == 1 && strncmp(name, WEVT_SOURCE_ALL_OF_PROVIDER_PREFIX, sizeof(WEVT_SOURCE_ALL_OF_PROVIDER_PREFIX) - 1) == 0)
+ // do not include "All-Of-X" when there is only 1 channel
+ return 0;
+
+ bool default_selected = (s->type == wevt_source_type_channel);
+ if(default_selected && (strcmp(name, "NetdataWEL") == 0 || strcmp(name, "Netdata/Access") == 0))
+ // do not select Netdata Access logs by default
+ default_selected = false;
+
+ buffer_json_add_array_item_object(wb);
+ {
+ char size_for_humans[128];
+ size_snprintf(size_for_humans, sizeof(size_for_humans), s->size, "B", false);
+
+ char duration_for_humans[128];
+ duration_snprintf(duration_for_humans, sizeof(duration_for_humans),
+ (time_t)((s->last_ut - s->first_ut) / USEC_PER_SEC), "s", true);
+
+ char entries_for_humans[128];
+ entries_snprintf(entries_for_humans, sizeof(entries_for_humans), s->entries, "", false);
+
+ char info[1024];
+ snprintfz(info, sizeof(info), "%zu channel%s, with a total size of %s, covering %s%s%s%s",
+ s->count, s->count > 1 ? "s":"", size_for_humans, duration_for_humans,
+ s->entries ? ", having " : "", s->entries ? entries_for_humans : "", s->entries ? " entries" : "");
+
+ buffer_json_member_add_string(wb, "id", name);
+ buffer_json_member_add_string(wb, "name", name);
+ buffer_json_member_add_string(wb, "pill", size_for_humans);
+ buffer_json_member_add_string(wb, "info", info);
+ buffer_json_member_add_boolean(wb, "default_selected", default_selected);
+ }
+ buffer_json_object_close(wb); // options object
+
+ return 1;
+}
+
+static bool wevt_source_merge_sizes(const DICTIONARY_ITEM *item __maybe_unused, void *old_value, void *new_value , void *data __maybe_unused) {
+ struct wevt_source *old_v = old_value;
+ const struct wevt_source *new_v = new_value;
+
+ old_v->count += new_v->count;
+ old_v->size += new_v->size;
+ old_v->entries += new_v->entries;
+
+ if(new_v->first_ut && new_v->first_ut < old_v->first_ut)
+ old_v->first_ut = new_v->first_ut;
+
+ if(new_v->last_ut && new_v->last_ut > old_v->last_ut)
+ old_v->last_ut = new_v->last_ut;
+
+ return false;
+}
+
+void wevt_sources_to_json_array(BUFFER *wb) {
+ DICTIONARY *dict = dictionary_create(DICT_OPTION_SINGLE_THREADED|DICT_OPTION_NAME_LINK_DONT_CLONE|DICT_OPTION_DONT_OVERWRITE_VALUE);
+ dictionary_register_conflict_callback(dict, wevt_source_merge_sizes, NULL);
+
+ struct wevt_source t = { 0 };
+
+ LOGS_QUERY_SOURCE *src;
+ dfe_start_read(wevt_sources, src) {
+ t.first_ut = src->msg_first_ut;
+ t.last_ut = src->msg_last_ut;
+ t.count = 1;
+ t.size = src->size;
+ t.entries = src->entries;
+
+ src->source_type |= WEVTS_ALL;
+ t.type = wevt_source_type_internal;
+ for(size_t i = 0; WEVT_SOURCE_TYPE_names[i].name ;i++) {
+ if(src->source_type & WEVT_SOURCE_TYPE_names[i].id)
+ dictionary_set(dict, WEVT_SOURCE_TYPE_names[i].name, &t, sizeof(t));
+ }
+
+ if(src->provider) {
+ t.type = wevt_source_type_provider;
+ dictionary_set(dict, string2str(src->provider), &t, sizeof(t));
+ }
+
+ if(src->source) {
+ t.type = wevt_source_type_channel;
+ dictionary_set(dict, string2str(src->source), &t, sizeof(t));
+ }
+ }
+ dfe_done(jf);
+
+ dictionary_sorted_walkthrough_read(dict, wevt_source_to_json_array_cb, wb);
+}
+
+static bool ndEvtGetChannelConfigProperty(EVT_HANDLE hChannelConfig, WEVT_VARIANT *pr, EVT_CHANNEL_CONFIG_PROPERTY_ID id) {
+ if (!EvtGetChannelConfigProperty(hChannelConfig, id, 0, pr->size, pr->data, &pr->used)) {
+ DWORD status = GetLastError();
+ if (ERROR_INSUFFICIENT_BUFFER == status) {
+ wevt_variant_resize(pr, pr->used);
+ if(!EvtGetChannelConfigProperty(hChannelConfig, id, 0, pr->size, pr->data, &pr->used)) {
+ pr->used = 0;
+ pr->count = 0;
+ return false;
+ }
+ }
+ }
+
+ wevt_variant_count_from_used(pr);
+ return true;
+}
+
+WEVT_SOURCE_TYPE categorize_channel(const wchar_t *channel_path, const char **provider, WEVT_VARIANT *property) {
+ EVT_HANDLE hChannelConfig = NULL;
+ WEVT_SOURCE_TYPE result = WEVTS_ALL;
+
+ // Open the channel configuration
+ hChannelConfig = EvtOpenChannelConfig(NULL, channel_path, 0);
+ if (!hChannelConfig)
+ goto cleanup;
+
+ if(ndEvtGetChannelConfigProperty(hChannelConfig, property, EvtChannelConfigType) &
+ property->count &&
+ property->data[0].Type == EvtVarTypeUInt32) {
+ switch (property->data[0].UInt32Val) {
+ case EvtChannelTypeAdmin:
+ result |= WEVTS_ADMIN;
+ break;
+
+ case EvtChannelTypeOperational:
+ result |= WEVTS_OPERATIONAL;
+ break;
+
+ case EvtChannelTypeAnalytic:
+ result |= WEVTS_ANALYTIC;
+ break;
+
+ case EvtChannelTypeDebug:
+ result |= WEVTS_DEBUG;
+ break;
+
+ default:
+ break;
+ }
+ }
+
+ if(ndEvtGetChannelConfigProperty(hChannelConfig, property, EvtChannelConfigClassicEventlog) &&
+ property->count &&
+ property->data[0].Type == EvtVarTypeBoolean &&
+ property->data[0].BooleanVal)
+ result |= WEVTS_CLASSIC;
+
+ if(ndEvtGetChannelConfigProperty(hChannelConfig, property, EvtChannelConfigOwningPublisher) &&
+ property->count &&
+ property->data[0].Type == EvtVarTypeString) {
+ *provider = provider2utf8(property->data[0].StringVal);
+ if(wcscasecmp(property->data[0].StringVal, L"Microsoft-Windows-EventCollector") == 0)
+ result |= WEVTS_FORWARDED;
+ }
+ else
+ *provider = NULL;
+
+ if(ndEvtGetChannelConfigProperty(hChannelConfig, property, EvtChannelConfigEnabled) &&
+ property->count &&
+ property->data[0].Type == EvtVarTypeBoolean) {
+ if(property->data[0].BooleanVal)
+ result |= WEVTS_ENABLED;
+ else
+ result |= WEVTS_DISABLED;
+ }
+
+ bool got_retention = false;
+ bool retained = false;
+ if(ndEvtGetChannelConfigProperty(hChannelConfig, property, EvtChannelLoggingConfigRetention) &&
+ property->count &&
+ property->data[0].Type == EvtVarTypeBoolean) {
+ got_retention = true;
+ retained = property->data[0].BooleanVal;
+ }
+
+ bool got_auto_backup = false;
+ bool auto_backup = false;
+ if(ndEvtGetChannelConfigProperty(hChannelConfig, property, EvtChannelLoggingConfigAutoBackup) &&
+ property->count &&
+ property->data[0].Type == EvtVarTypeBoolean) {
+ got_auto_backup = true;
+ auto_backup = property->data[0].BooleanVal;
+ }
+
+ if(got_retention && got_auto_backup) {
+ if(!retained) {
+ if(auto_backup)
+ result |= WEVTS_BACKUP_MODE;
+ else
+ result |= WEVTS_OVERWRITE_MODE;
+ }
+ else {
+ if(auto_backup)
+ result |= WEVTS_STOP_WHEN_FULL_MODE;
+ else
+ result |= WEVTS_RETAIN_AND_BACKUP_MODE;
+ }
+ }
+
+cleanup:
+ if (hChannelConfig)
+ EvtClose(hChannelConfig);
+
+ return result;
+}
+
+void wevt_sources_scan(void) {
+ static SPINLOCK spinlock = NETDATA_SPINLOCK_INITIALIZER;
+ LPWSTR channel = NULL;
+ EVT_HANDLE hChannelEnum = NULL;
+
+ if(spinlock_trylock(&spinlock)) {
+ const usec_t started_ut = now_monotonic_usec();
+
+ WEVT_VARIANT property = { 0 };
+ DWORD dwChannelBufferSize = 0;
+ DWORD dwChannelBufferUsed = 0;
+ DWORD status = ERROR_SUCCESS;
+
+ // Open a handle to enumerate the event channels
+ hChannelEnum = EvtOpenChannelEnum(NULL, 0);
+ if (!hChannelEnum) {
+ nd_log(NDLS_COLLECTORS, NDLP_ERR, "WINDOWS EVENTS: EvtOpenChannelEnum() failed with %" PRIu64 "\n",
+ (uint64_t)GetLastError());
+ goto cleanup;
+ }
+
+ WEVT_LOG *log = wevt_openlog6(WEVT_QUERY_RETENTION);
+ if(!log) goto cleanup;
+
+ while (true) {
+ if (!EvtNextChannelPath(hChannelEnum, dwChannelBufferSize, channel, &dwChannelBufferUsed)) {
+ status = GetLastError();
+ if (status == ERROR_NO_MORE_ITEMS)
+ break; // No more channels
+ else if (status == ERROR_INSUFFICIENT_BUFFER) {
+ dwChannelBufferSize = dwChannelBufferUsed;
+ freez(channel);
+ channel = mallocz(dwChannelBufferSize * sizeof(WCHAR));
+ continue;
+ } else {
+ nd_log(NDLS_COLLECTORS, NDLP_ERR,
+ "WINDOWS EVENTS: EvtNextChannelPath() failed\n");
+ break;
+ }
+ }
+
+ EVT_RETENTION retention;
+ if(!wevt_channel_retention(log, channel, NULL, &retention))
+ continue;
+
+ LOGS_QUERY_SOURCE *found = dictionary_get(wevt_sources, channel2utf8(channel));
+ if(found) {
+ // we just need to update its retention
+
+ found->last_scan_monotonic_ut = now_monotonic_usec();
+ found->msg_first_id = retention.first_event.id;
+ found->msg_last_id = retention.last_event.id;
+ found->msg_first_ut = retention.first_event.created_ns / NSEC_PER_USEC;
+ found->msg_last_ut = retention.last_event.created_ns / NSEC_PER_USEC;
+ found->size = retention.size_bytes;
+ continue;
+ }
+
+ const char *name = channel2utf8(channel);
+ const char *fullname = strdupz(name);
+ const char *provider;
+
+ WEVT_SOURCE_TYPE sources = categorize_channel(channel, &provider, &property);
+ char *slash = strchr(name, '/');
+ if(slash) *slash = '\0';
+
+ if(strcasecmp(name, "Application") == 0)
+ sources |= WEVTS_WINDOWS;
+ if(strcasecmp(name, "Security") == 0)
+ sources |= WEVTS_WINDOWS;
+ if(strcasecmp(name, "Setup") == 0)
+ sources |= WEVTS_WINDOWS;
+ if(strcasecmp(name, "System") == 0)
+ sources |= WEVTS_WINDOWS;
+
+ LOGS_QUERY_SOURCE src = {
+ .entries = retention.entries,
+ .fullname = fullname,
+ .fullname_len = strlen(fullname),
+ .last_scan_monotonic_ut = now_monotonic_usec(),
+ .msg_first_id = retention.first_event.id,
+ .msg_last_id = retention.last_event.id,
+ .msg_first_ut = retention.first_event.created_ns / NSEC_PER_USEC,
+ .msg_last_ut = retention.last_event.created_ns / NSEC_PER_USEC,
+ .size = retention.size_bytes,
+ .source_type = sources,
+ .source = string_strdupz(fullname),
+ };
+
+ if(strncmp(fullname, "Netdata", 7) == 0)
+ // WEL based providers of Netdata are named NetdataX
+ provider = "Netdata";
+
+ if(provider && *provider) {
+ char buf[sizeof(WEVT_SOURCE_ALL_OF_PROVIDER_PREFIX) + strlen(provider)]; // sizeof() includes terminator
+ snprintf(buf, sizeof(buf), WEVT_SOURCE_ALL_OF_PROVIDER_PREFIX "%s", provider);
+
+ if(trim_all(buf) != NULL) {
+ for (size_t i = 0; i < sizeof(buf) - 1; i++) {
+ // remove character that may interfere with our parsing
+ if (isspace((uint8_t) buf[i]) || buf[i] == '%' || buf[i] == '+' || buf[i] == '|' || buf[i] == ':')
+ buf[i] = '_';
+ }
+ src.provider = string_strdupz(buf);
+ }
+ }
+
+ dictionary_set(wevt_sources, src.fullname, &src, sizeof(src));
+ }
+
+// // add custom queries
+// for(size_t i = 0; i < sizeof(custom_queries) / sizeof(custom_queries[0]) ;i++) {
+// EVT_RETENTION retention;
+// if(!wevt_channel_retention(log, NULL, custom_queries[i].query, &retention))
+// continue;
+//
+// LOGS_QUERY_SOURCE src = {
+// .entries = 0,
+// .fullname = strdupz(custom_queries[i].name),
+// .fullname_len = strlen(custom_queries[i].name),
+// .last_scan_monotonic_ut = now_monotonic_usec(),
+// .msg_first_id = retention.first_event.id,
+// .msg_last_id = retention.last_event.id,
+// .msg_first_ut = retention.first_event.created_ns / NSEC_PER_USEC,
+// .msg_last_ut = retention.last_event.created_ns / NSEC_PER_USEC,
+// .size = retention.size_bytes,
+// .source_type = WEVTS_ALL,
+// .source = string_strdupz(custom_queries[i].name),
+// };
+//
+// dictionary_set(wevt_sources, src.fullname, &src, sizeof(src));
+// }
+//
+ wevt_closelog6(log);
+
+ LOGS_QUERY_SOURCE *src;
+ dfe_start_write(wevt_sources, src)
+ {
+ if(src->last_scan_monotonic_ut < started_ut) {
+ src->msg_first_id = 0;
+ src->msg_last_id = 0;
+ src->msg_first_ut = 0;
+ src->msg_last_ut = 0;
+ src->size = 0;
+ dictionary_del(wevt_sources, src->fullname);
+ }
+ }
+ dfe_done(src);
+ dictionary_garbage_collect(wevt_sources);
+
+ spinlock_unlock(&spinlock);
+
+ wevt_variant_cleanup(&property);
+ }
+
+cleanup:
+ freez(channel);
+ EvtClose(hChannelEnum);
+}
diff --git a/src/collectors/windows-events.plugin/windows-events-sources.h b/src/collectors/windows-events.plugin/windows-events-sources.h
new file mode 100644
index 000000000..4ad4880d7
--- /dev/null
+++ b/src/collectors/windows-events.plugin/windows-events-sources.h
@@ -0,0 +1,78 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_WINDOWS_EVENTS_SOURCES_H
+#define NETDATA_WINDOWS_EVENTS_SOURCES_H
+
+#include "libnetdata/libnetdata.h"
+
+typedef enum {
+ WEVTS_NONE = 0,
+ WEVTS_ALL = (1 << 0),
+ WEVTS_ADMIN = (1 << 1),
+ WEVTS_OPERATIONAL = (1 << 2),
+ WEVTS_ANALYTIC = (1 << 3),
+ WEVTS_DEBUG = (1 << 4),
+ WEVTS_WINDOWS = (1 << 5),
+ WEVTS_ENABLED = (1 << 6),
+ WEVTS_DISABLED = (1 << 7),
+ WEVTS_FORWARDED = (1 << 8),
+ WEVTS_CLASSIC = (1 << 9),
+ WEVTS_BACKUP_MODE = (1 << 10),
+ WEVTS_OVERWRITE_MODE = (1 << 11),
+ WEVTS_STOP_WHEN_FULL_MODE = (1 << 12),
+ WEVTS_RETAIN_AND_BACKUP_MODE = (1 << 13),
+} WEVT_SOURCE_TYPE;
+
+BITMAP_STR_DEFINE_FUNCTIONS_EXTERN(WEVT_SOURCE_TYPE)
+
+#define WEVT_SOURCE_ALL_NAME "All"
+#define WEVT_SOURCE_ALL_ADMIN_NAME "All-Admin"
+#define WEVT_SOURCE_ALL_OPERATIONAL_NAME "All-Operational"
+#define WEVT_SOURCE_ALL_ANALYTIC_NAME "All-Analytic"
+#define WEVT_SOURCE_ALL_DEBUG_NAME "All-Debug"
+#define WEVT_SOURCE_ALL_WINDOWS_NAME "All-Windows"
+#define WEVT_SOURCE_ALL_ENABLED_NAME "All-Enabled"
+#define WEVT_SOURCE_ALL_DISABLED_NAME "All-Disabled"
+#define WEVT_SOURCE_ALL_FORWARDED_NAME "All-Forwarded"
+#define WEVT_SOURCE_ALL_CLASSIC_NAME "All-Classic"
+#define WEVT_SOURCE_ALL_BACKUP_MODE_NAME "All-In-Backup-Mode"
+#define WEVT_SOURCE_ALL_OVERWRITE_MODE_NAME "All-In-Overwrite-Mode"
+#define WEVT_SOURCE_ALL_STOP_WHEN_FULL_MODE_NAME "All-In-StopWhenFull-Mode"
+#define WEVT_SOURCE_ALL_RETAIN_AND_BACKUP_MODE_NAME "All-In-RetainAndBackup-Mode"
+
+#define WEVT_SOURCE_ALL_OF_PROVIDER_PREFIX "All-Of-"
+
+typedef struct {
+ const char *fullname;
+ size_t fullname_len;
+
+ const wchar_t *custom_query;
+
+ STRING *source;
+ STRING *provider;
+ WEVT_SOURCE_TYPE source_type;
+ usec_t msg_first_ut;
+ usec_t msg_last_ut;
+ size_t size;
+
+ usec_t last_scan_monotonic_ut;
+
+ uint64_t msg_first_id;
+ uint64_t msg_last_id;
+ uint64_t entries;
+} LOGS_QUERY_SOURCE;
+
+extern DICTIONARY *wevt_sources;
+extern DICTIONARY *used_hashes_registry;
+
+void wevt_sources_init(void);
+void wevt_sources_scan(void);
+void buffer_json_wevt_versions(BUFFER *wb);
+
+void wevt_sources_to_json_array(BUFFER *wb);
+WEVT_SOURCE_TYPE wevt_internal_source_type(const char *value);
+
+int wevt_sources_dict_items_backward_compar(const void *a, const void *b);
+int wevt_sources_dict_items_forward_compar(const void *a, const void *b);
+
+#endif //NETDATA_WINDOWS_EVENTS_SOURCES_H
diff --git a/src/collectors/windows-events.plugin/windows-events-unicode.c b/src/collectors/windows-events.plugin/windows-events-unicode.c
new file mode 100644
index 000000000..81da31107
--- /dev/null
+++ b/src/collectors/windows-events.plugin/windows-events-unicode.c
@@ -0,0 +1,46 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "windows-events-unicode.h"
+
+inline void utf82unicode(wchar_t *dst, size_t dst_size, const char *src) {
+ if (src) {
+ // Convert from UTF-8 to wide char (UTF-16)
+ if (utf8_to_utf16(dst, dst_size, src, -1) == 0)
+ wcsncpy(dst, L"[failed conv.]", dst_size - 1);
+ }
+ else
+ wcsncpy(dst, L"[null]", dst_size - 1);
+}
+
+inline void unicode2utf8(char *dst, size_t dst_size, const wchar_t *src) {
+ if (src) {
+ if(WideCharToMultiByte(CP_UTF8, 0, src, -1, dst, (int)dst_size, NULL, NULL) == 0)
+ strncpyz(dst, "[failed conv.]", dst_size - 1);
+ }
+ else
+ strncpyz(dst, "[null]", dst_size - 1);
+}
+
+wchar_t *channel2unicode(const char *utf8str) {
+ static __thread wchar_t buffer[1024];
+ utf82unicode(buffer, _countof(buffer), utf8str);
+ return buffer;
+}
+
+char *channel2utf8(const wchar_t *channel) {
+ static __thread char buffer[1024];
+ unicode2utf8(buffer, sizeof(buffer), channel);
+ return buffer;
+}
+
+char *query2utf8(const wchar_t *query) {
+ static __thread char buffer[16384];
+ unicode2utf8(buffer, sizeof(buffer), query);
+ return buffer;
+}
+
+char *provider2utf8(const wchar_t *provider) {
+ static __thread char buffer[256];
+ unicode2utf8(buffer, sizeof(buffer), provider);
+ return buffer;
+}
diff --git a/src/collectors/windows-events.plugin/windows-events-unicode.h b/src/collectors/windows-events.plugin/windows-events-unicode.h
new file mode 100644
index 000000000..e932bb5df
--- /dev/null
+++ b/src/collectors/windows-events.plugin/windows-events-unicode.h
@@ -0,0 +1,42 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_WINDOWS_EVENTS_UNICODE_H
+#define NETDATA_WINDOWS_EVENTS_UNICODE_H
+
+#include "libnetdata/libnetdata.h"
+
+#define WINEVENT_NAME_KEYWORDS_SEPARATOR ", "
+static inline void txt_utf8_add_keywords_separator_if_needed(TXT_UTF8 *dst) {
+ if(dst->used > 1)
+ txt_utf8_append(dst, WINEVENT_NAME_KEYWORDS_SEPARATOR, sizeof(WINEVENT_NAME_KEYWORDS_SEPARATOR) - 1);
+}
+
+static inline void txt_utf8_set_numeric_if_empty(TXT_UTF8 *dst, const char *prefix, size_t len, uint64_t value) {
+ if(dst->used <= 1) {
+ txt_utf8_resize(dst, len + UINT64_MAX_LENGTH + 1, false);
+ memcpy(dst->data, prefix, len);
+ dst->used = len + print_uint64(&dst->data[len], value) + 1;
+ }
+}
+
+static inline void txt_utf8_set_hex_if_empty(TXT_UTF8 *dst, const char *prefix, size_t len, uint64_t value) {
+ if(dst->used <= 1) {
+ txt_utf8_resize(dst, len + UINT64_HEX_MAX_LENGTH + 1, false);
+ memcpy(dst->data, prefix, len);
+ dst->used = len + print_uint64_hex_full(&dst->data[len], value) + 1;
+ }
+}
+
+// --------------------------------------------------------------------------------------------------------------------
+// conversions
+
+void unicode2utf8(char *dst, size_t dst_size, const wchar_t *src);
+void utf82unicode(wchar_t *dst, size_t dst_size, const char *src);
+
+char *channel2utf8(const wchar_t *channel);
+wchar_t *channel2unicode(const char *utf8str);
+
+char *query2utf8(const wchar_t *query);
+char *provider2utf8(const wchar_t *provider);
+
+#endif //NETDATA_WINDOWS_EVENTS_UNICODE_H
diff --git a/src/collectors/windows-events.plugin/windows-events-xml.c b/src/collectors/windows-events.plugin/windows-events-xml.c
new file mode 100644
index 000000000..931ea6c54
--- /dev/null
+++ b/src/collectors/windows-events.plugin/windows-events-xml.c
@@ -0,0 +1,344 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "windows-events-xml.h"
+
+#include <string.h>
+#include <stdio.h>
+
+#define INDENT_STEP 2
+#define A_LOT_OF_SPACES " "
+
+// Helper: Add indentation
+static inline void buffer_add_xml_indent(BUFFER *buffer, const int level) {
+ size_t total_spaces = (size_t)level * INDENT_STEP;
+ const size_t step = sizeof(A_LOT_OF_SPACES) - 1;
+ while (total_spaces > 0) {
+ const size_t spaces_to_add = (total_spaces > step) ? step : total_spaces;
+ buffer_fast_strcat(buffer, A_LOT_OF_SPACES, spaces_to_add);
+ total_spaces -= spaces_to_add;
+ }
+}
+
+const char *append_the_rest(BUFFER *buffer, const char *xml, const char *end) {
+ if(xml >= end) return end;
+ buffer_fast_strcat(buffer, xml, end - xml);
+ return end;
+}
+
+static const char *parse_node(BUFFER *buffer, const char *xml, const char *end, int level);
+
+// Helper: Parse the value (between > and <) and return the next position to parse
+const char *parse_value_and_closing_tag(BUFFER *buffer, const char *xml, const char *end, int level) {
+ const char *start = xml;
+ bool has_subnodes = false;
+
+ // const char *tag_start = NULL, *tag_end = NULL;
+ while (xml < end) {
+ if(*xml == '<') {
+ if(xml + 1 < end && *(xml + 1) == '/') {
+ // a closing tag
+ xml += 2;
+
+// tag_start = xml;
+
+ while(xml < end && *xml != '>')
+ xml++;
+
+// tag_end = xml;
+
+ if(xml < end && *xml == '>')
+ xml++;
+
+ if(has_subnodes) {
+ buffer_putc(buffer, '\n');
+ buffer_add_xml_indent(buffer, level);
+ }
+
+ buffer_fast_strcat(buffer, start, xml - start);
+ return xml;
+ }
+ else {
+ // an opening tag
+ buffer_fast_strcat(buffer, start, xml - start);
+ xml = start = parse_node(buffer, xml, end, level + 1);
+ while(xml < end && isspace((uint8_t)*xml))
+ xml++;
+ has_subnodes = true;
+ }
+ }
+ else
+ xml++;
+ }
+
+ return append_the_rest(buffer, start, end);
+}
+
+// Parse a field value and return the next position to parse
+const char *parse_field_value(BUFFER *buffer, const char *xml, const char *end) {
+ const char quote = *xml;
+
+ if(quote != '"' && quote != '\'')
+ return append_the_rest(buffer, xml, end);
+
+ const char *start = xml++;
+
+ while (xml < end && *xml != quote) {
+ if (*xml == '\\') {
+ xml++; // Skip escape character
+
+ if(xml < end)
+ xml++;
+
+ continue;
+ }
+
+ xml++;
+ }
+
+ if(xml < end && *xml == quote) {
+ xml++; // Move past the closing quote
+ buffer_fast_strcat(buffer, start, xml - start);
+ return xml;
+ }
+
+ return append_the_rest(buffer, start, end);
+}
+
+// Parse a field name and return the next position to parse
+const char *parse_field(BUFFER *buffer, const char *xml, const char *end) {
+ while(isspace((uint8_t)*xml) && xml < end) xml++;
+
+ const char *start = xml;
+
+ while (*xml != '=' && xml < end)
+ xml++;
+
+ // Append the field name
+ buffer_fast_strcat(buffer, start, xml - start);
+
+ if(xml < end && *xml == '=') {
+ xml++;
+
+ buffer_putc(buffer, '=');
+
+ if(xml < end && (*xml == '"' || *xml == '\''))
+ xml = parse_field_value(buffer, xml, end);
+
+ return xml; // Return the next character to parse
+ }
+
+ return append_the_rest(buffer, start, end);
+}
+
+// Parse a node (handles fields and subnodes) and return the next position to parse
+static inline const char *parse_node(BUFFER *buffer, const char *xml, const char *end, int level) {
+ if(*xml != '<')
+ return append_the_rest(buffer, xml, end);
+
+ const char *start = xml++; // skip the <
+
+ buffer_putc(buffer, '\n');
+ buffer_add_xml_indent(buffer, level);
+
+ // skip spaces before the tag name
+ while(xml < end && isspace((uint8_t)*xml)) xml++;
+
+ // Parse the tag name
+// const char *tag_start = xml, *tag_end = NULL;
+ while (xml < end && *xml != '>' && *xml != '/') {
+ xml++;
+
+ if(xml < end && isspace((uint8_t)*xml)) {
+ xml++;
+// tag_end = xml;
+
+ while(xml < end && isspace((uint8_t)*xml))
+ xml++;
+
+ if(xml < end && *xml == '/') {
+ // an opening tag that is self-closing
+ xml++;
+ if(xml < end && *xml == '>') {
+ xml++;
+ buffer_fast_strcat(buffer, start, xml - start);
+ return xml;
+ }
+ else
+ return append_the_rest(buffer, start, end);
+ }
+ else if(xml < end && *xml == '>') {
+ // the end of an opening tag
+ xml++;
+ buffer_fast_strcat(buffer, start, xml - start);
+ return parse_value_and_closing_tag(buffer, xml, end, level);
+ }
+ else {
+ buffer_fast_strcat(buffer, start, xml - start);
+ xml = start = parse_field(buffer, xml, end);
+ while(xml < end && isspace((uint8_t)*xml))
+ xml++;
+ }
+ }
+ }
+
+ bool self_closing_tag = false;
+ if(xml < end && *xml == '/') {
+ self_closing_tag = true;
+ xml++;
+ }
+
+ if(xml < end && *xml == '>') {
+ xml++;
+ buffer_fast_strcat(buffer, start, xml - start);
+
+ if(self_closing_tag)
+ return xml;
+
+ return parse_value_and_closing_tag(buffer, xml, end, level);
+ }
+
+ return append_the_rest(buffer, start, end);
+}
+
+static inline void buffer_pretty_print_xml_object(BUFFER *buffer, const char *xml, const char *end) {
+ while(xml < end) {
+ while(xml < end && isspace((uint8_t)*xml))
+ xml++;
+
+ if(xml < end && *xml == '<')
+ xml = parse_node(buffer, xml, end, 1);
+ else {
+ append_the_rest(buffer, xml, end);
+ return;
+ }
+ }
+}
+
+void buffer_pretty_print_xml(BUFFER *buffer, const char *xml, size_t xml_len) {
+ const char *end = xml + xml_len;
+ buffer_pretty_print_xml_object(buffer, xml, end);
+}
+
+// --------------------------------------------------------------------------------------------------------------------
+
+bool buffer_extract_and_print_xml_with_cb(BUFFER *buffer, const char *xml, size_t xml_len, const char *prefix, const char *keys[],
+ void (*cb)(BUFFER *, const char *, const char *, const char *)) {
+ if(!keys || !*keys[0]) {
+ buffer_pretty_print_xml(buffer, xml, xml_len);
+ return true;
+ }
+
+ const char *start = xml, *end = NULL;
+ for(size_t k = 0; keys[k] ; k++) {
+ if(!*keys[k]) continue;
+
+ size_t klen = strlen(keys[k]);
+ char tag_open[klen + 2];
+ tag_open[0] = '<';
+ strcpy(&tag_open[1], keys[k]);
+ tag_open[klen + 1] = '\0';
+
+ const char *new_start = strstr(start, tag_open);
+ if(!new_start)
+ return false;
+
+ start = new_start + klen + 1;
+
+ if(*start != '>' && !isspace((uint8_t)*start))
+ return false;
+
+ if(*start != '>') {
+ start = strchr(start, '>');
+ if(!start) return false;
+ }
+ start++; // skip the >
+
+ char tag_close[klen + 4];
+ tag_close[0] = '<';
+ tag_close[1] = '/';
+ strcpy(&tag_close[2], keys[k]);
+ tag_close[klen + 2] = '>';
+ tag_close[klen + 3] = '\0';
+
+ const char *new_end = strstr(start, tag_close);
+ if(!new_end || (end && new_end > end))
+ return false;
+
+ end = new_end;
+ }
+
+ if(!start || !end || start == end)
+ return false;
+
+ cb(buffer, prefix, start, end);
+ return true;
+}
+
+static void print_xml_cb(BUFFER *buffer, const char *prefix, const char *start, const char *end) {
+ if(prefix)
+ buffer_strcat(buffer, prefix);
+
+ buffer_pretty_print_xml_object(buffer, start, end);
+}
+
+bool buffer_extract_and_print_xml(BUFFER *buffer, const char *xml, size_t xml_len, const char *prefix, const char *keys[]) {
+ return buffer_extract_and_print_xml_with_cb(
+ buffer, xml, xml_len,
+ prefix, keys,
+ print_xml_cb);
+}
+
+static void print_value_cb(BUFFER *buffer, const char *prefix, const char *start, const char *end) {
+ if(prefix)
+ buffer_strcat(buffer, prefix);
+
+ buffer_need_bytes(buffer, end - start + 1);
+
+ char *started = &buffer->buffer[buffer->len];
+ char *d = started;
+ const char *s = start;
+
+ while(s < end && s) {
+ if(*s == '&' && s + 3 < end) {
+ if(*(s + 1) == '#') {
+ if(s + 4 < end && *(s + 2) == '1' && *(s + 4) == ';') {
+ if (*(s + 3) == '0') {
+ s += 5;
+ *d++ = '\n';
+ continue;
+ } else if (*(s + 3) == '3') {
+ s += 5;
+ // *d++ = '\r';
+ continue;
+ }
+ } else if (*(s + 2) == '9' && *(s + 3) == ';') {
+ s += 4;
+ *d++ = '\t';
+ continue;
+ }
+ }
+ else if(s + 3 < end && *(s + 2) == 't' && *(s + 3) == ';') {
+ if(*(s + 1) == 'l') {
+ s += 4;
+ *d++ = '<';
+ continue;
+ }
+ else if(*(s + 1) == 'g') {
+ s += 4;
+ *d++ = '>';
+ continue;
+ }
+ }
+ }
+ *d++ = *s++;
+ }
+ *d = '\0';
+ buffer->len += d - started;
+}
+
+bool buffer_xml_extract_and_print_value(BUFFER *buffer, const char *xml, size_t xml_len, const char *prefix, const char *keys[]) {
+ return buffer_extract_and_print_xml_with_cb(
+ buffer, xml, xml_len,
+ prefix, keys,
+ print_value_cb);
+}
diff --git a/src/collectors/windows-events.plugin/windows-events-xml.h b/src/collectors/windows-events.plugin/windows-events-xml.h
new file mode 100644
index 000000000..78d2f686e
--- /dev/null
+++ b/src/collectors/windows-events.plugin/windows-events-xml.h
@@ -0,0 +1,12 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef WINDOWS_EVENTS_XML_H
+#define WINDOWS_EVENTS_XML_H
+
+#include "libnetdata/libnetdata.h"
+
+void buffer_pretty_print_xml(BUFFER *buffer, const char *xml, size_t xml_len);
+bool buffer_extract_and_print_xml(BUFFER *buffer, const char *xml, size_t xml_len, const char *prefix, const char *keys[]);
+bool buffer_xml_extract_and_print_value(BUFFER *buffer, const char *xml, size_t xml_len, const char *prefix, const char *keys[]);
+
+#endif //WINDOWS_EVENTS_XML_H
diff --git a/src/collectors/windows-events.plugin/windows-events.c b/src/collectors/windows-events.plugin/windows-events.c
new file mode 100644
index 000000000..09ce558ae
--- /dev/null
+++ b/src/collectors/windows-events.plugin/windows-events.c
@@ -0,0 +1,1402 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "libnetdata/libnetdata.h"
+#include "libnetdata/required_dummies.h"
+
+#include "windows-events.h"
+
+netdata_mutex_t stdout_mutex = NETDATA_MUTEX_INITIALIZER;
+static bool plugin_should_exit = false;
+
+#define WEVT_ALWAYS_VISIBLE_KEYS NULL
+
+#define WEVT_KEYS_EXCLUDED_FROM_FACETS \
+ "|" WEVT_FIELD_MESSAGE \
+ "|" WEVT_FIELD_XML \
+ ""
+
+#define WEVT_KEYS_INCLUDED_IN_FACETS \
+ "|" WEVT_FIELD_COMPUTER \
+ "|" WEVT_FIELD_PROVIDER \
+ "|" WEVT_FIELD_LEVEL \
+ "|" WEVT_FIELD_KEYWORDS \
+ "|" WEVT_FIELD_OPCODE \
+ "|" WEVT_FIELD_TASK \
+ "|" WEVT_FIELD_ACCOUNT \
+ "|" WEVT_FIELD_DOMAIN \
+ "|" WEVT_FIELD_SID \
+ ""
+
+#define query_has_fts(lqs) ((lqs)->rq.query != NULL)
+
+static inline WEVT_QUERY_STATUS check_stop(const bool *cancelled, const usec_t *stop_monotonic_ut) {
+ if(cancelled && __atomic_load_n(cancelled, __ATOMIC_RELAXED)) {
+ nd_log(NDLS_COLLECTORS, NDLP_INFO, "Function has been cancelled");
+ return WEVT_CANCELLED;
+ }
+
+ if(now_monotonic_usec() > __atomic_load_n(stop_monotonic_ut, __ATOMIC_RELAXED)) {
+ internal_error(true, "Function timed out");
+ return WEVT_TIMED_OUT;
+ }
+
+ return WEVT_OK;
+}
+
+FACET_ROW_SEVERITY wevt_levelid_to_facet_severity(FACETS *facets __maybe_unused, FACET_ROW *row, void *data __maybe_unused) {
+ FACET_ROW_KEY_VALUE *levelid_rkv = dictionary_get(row->dict, WEVT_FIELD_LEVEL "ID");
+ if(!levelid_rkv || levelid_rkv->empty)
+ return FACET_ROW_SEVERITY_NORMAL;
+
+ int windows_event_level = str2i(buffer_tostring(levelid_rkv->wb));
+
+ switch (windows_event_level) {
+ case WEVT_LEVEL_VERBOSE:
+ return FACET_ROW_SEVERITY_DEBUG;
+
+ default:
+ case WEVT_LEVEL_INFORMATION:
+ return FACET_ROW_SEVERITY_NORMAL;
+
+ case WEVT_LEVEL_WARNING:
+ return FACET_ROW_SEVERITY_WARNING;
+
+ case WEVT_LEVEL_ERROR:
+ case WEVT_LEVEL_CRITICAL:
+ return FACET_ROW_SEVERITY_CRITICAL;
+ }
+}
+
+struct wevt_bin_data {
+ bool rendered;
+ WEVT_EVENT ev;
+ WEVT_LOG *log;
+ EVT_HANDLE hEvent;
+ PROVIDER_META_HANDLE *provider;
+};
+
+static void wevt_cleanup_bin_data(void *data) {
+ struct wevt_bin_data *d = data;
+
+ if(d->hEvent)
+ EvtClose(d->hEvent);
+
+ provider_release(d->provider);
+ freez(d);
+}
+
+static inline void wevt_facets_register_bin_data(WEVT_LOG *log, FACETS *facets, WEVT_EVENT *ev) {
+ struct wevt_bin_data *d = mallocz(sizeof(struct wevt_bin_data));
+
+#ifdef NETDATA_INTERNAL_CHECKS
+ internal_fatal(strcmp(log->ops.provider.data, provider_get_name(log->provider)) != 0,
+ "Provider name mismatch in data!");
+
+ internal_fatal(!UUIDeq(ev->provider, provider_get_uuid(log->provider)),
+ "Provider UUID mismatch in data!");
+#endif
+
+ d->ev = *ev;
+ d->log = log;
+ d->rendered = false;
+
+ // take the bookmark
+ d->hEvent = log->hEvent; log->hEvent = NULL;
+
+ // dup the provider
+ d->provider = provider_dup(log->provider);
+
+ facets_row_bin_data_set(facets, wevt_cleanup_bin_data, d);
+}
+
+static void wevt_lazy_loading_event_and_xml(struct wevt_bin_data *d, FACET_ROW *row __maybe_unused) {
+ if(d->rendered) return;
+
+#ifdef NETDATA_INTERNAL_CHECKS
+ const FACET_ROW_KEY_VALUE *provider_rkv = dictionary_get(row->dict, WEVT_FIELD_PROVIDER);
+ internal_fatal(!provider_rkv || strcmp(buffer_tostring(provider_rkv->wb), provider_get_name(d->provider)) != 0,
+ "Provider of row does not match the bin data associated with it");
+
+ uint64_t event_record_id = UINT64_MAX;
+ const FACET_ROW_KEY_VALUE *event_record_id_rkv = dictionary_get(row->dict, WEVT_FIELD_EVENTRECORDID);
+ if(event_record_id_rkv)
+ event_record_id = str2uint64_t(buffer_tostring(event_record_id_rkv->wb), NULL);
+ internal_fatal(event_record_id != d->ev.id,
+ "Event Record ID of row does not match the bin data associated with it");
+#endif
+
+ // the message needs the xml
+ EvtFormatMessage_Xml_utf8(&d->log->ops.unicode, d->provider, d->hEvent, &d->log->ops.xml);
+ EvtFormatMessage_Event_utf8(&d->log->ops.unicode, d->provider, d->hEvent, &d->log->ops.event);
+ d->rendered = true;
+}
+
+static void wevt_lazy_load_xml(
+ FACETS *facets,
+ BUFFER *json_array,
+ FACET_ROW_KEY_VALUE *rkv __maybe_unused,
+ FACET_ROW *row,
+ void *data __maybe_unused) {
+
+ struct wevt_bin_data *d = facets_row_bin_data_get(facets, row);
+ if(!d) {
+ buffer_json_add_array_item_string(json_array, "Failed to get row BIN DATA from facets");
+ return;
+ }
+
+ wevt_lazy_loading_event_and_xml(d, row);
+ buffer_json_add_array_item_string(json_array, d->log->ops.xml.data);
+}
+
+static void wevt_lazy_load_message(
+ FACETS *facets,
+ BUFFER *json_array,
+ FACET_ROW_KEY_VALUE *rkv __maybe_unused,
+ FACET_ROW *row,
+ void *data __maybe_unused) {
+
+ struct wevt_bin_data *d = facets_row_bin_data_get(facets, row);
+ if(!d) {
+ buffer_json_add_array_item_string(json_array, "Failed to get row BIN DATA from facets");
+ return;
+ }
+
+ wevt_lazy_loading_event_and_xml(d, row);
+
+ if(d->log->ops.event.used <= 1) {
+ TXT_UTF8 *xml = &d->log->ops.xml;
+
+ buffer_flush(rkv->wb);
+
+ bool added_message = false;
+ if(xml->used > 1) {
+ const char *message_path[] = {
+ "RenderingInfo",
+ "Message",
+ NULL};
+
+ added_message = buffer_xml_extract_and_print_value(
+ rkv->wb,
+ xml->data, xml->used - 1,
+ NULL,
+ message_path);
+ }
+
+ if(!added_message) {
+ const FACET_ROW_KEY_VALUE *event_id_rkv = dictionary_get(row->dict, WEVT_FIELD_EVENTID);
+ if (event_id_rkv && buffer_strlen(event_id_rkv->wb)) {
+ buffer_fast_strcat(rkv->wb, "Event ", 6);
+ buffer_fast_strcat(rkv->wb, buffer_tostring(event_id_rkv->wb), buffer_strlen(event_id_rkv->wb));
+ } else
+ buffer_strcat(rkv->wb, "Unknown Event ");
+
+ const FACET_ROW_KEY_VALUE *provider_rkv = dictionary_get(row->dict, WEVT_FIELD_PROVIDER);
+ if (provider_rkv && buffer_strlen(provider_rkv->wb)) {
+ buffer_fast_strcat(rkv->wb, " of ", 4);
+ buffer_fast_strcat(rkv->wb, buffer_tostring(provider_rkv->wb), buffer_strlen(provider_rkv->wb));
+ buffer_putc(rkv->wb, '.');
+ } else
+ buffer_strcat(rkv->wb, "of unknown Provider.");
+ }
+
+ if(xml->used > 1) {
+ const char *event_path[] = {
+ "EventData",
+ NULL
+ };
+ bool added_event_data = buffer_extract_and_print_xml(
+ rkv->wb,
+ xml->data, xml->used - 1,
+ "\n\nRelated event data:\n",
+ event_path);
+
+ const char *user_path[] = {
+ "UserData",
+ NULL
+ };
+ bool added_user_data = buffer_extract_and_print_xml(
+ rkv->wb,
+ xml->data, xml->used - 1,
+ "\n\nRelated user data:\n",
+ user_path);
+
+ if(!added_event_data && !added_user_data)
+ buffer_strcat(rkv->wb, " Without any related data.");
+ }
+
+ buffer_json_add_array_item_string(json_array, buffer_tostring(rkv->wb));
+ }
+ else
+ buffer_json_add_array_item_string(json_array, d->log->ops.event.data);
+}
+
+static void wevt_register_fields(LOGS_QUERY_STATUS *lqs) {
+ // the order of the fields here, controls the order of the fields at the table presented
+
+ FACETS *facets = lqs->facets;
+ LOGS_QUERY_REQUEST *rq = &lqs->rq;
+
+ facets_register_row_severity(facets, wevt_levelid_to_facet_severity, NULL);
+
+ facets_register_key_name(
+ facets, WEVT_FIELD_COMPUTER,
+ rq->default_facet | FACET_KEY_OPTION_VISIBLE);
+
+ facets_register_key_name(
+ facets, WEVT_FIELD_CHANNEL,
+ rq->default_facet | FACET_KEY_OPTION_FTS);
+
+ facets_register_key_name(
+ facets, WEVT_FIELD_PROVIDER,
+ rq->default_facet | FACET_KEY_OPTION_VISIBLE | FACET_KEY_OPTION_FTS);
+
+ facets_register_key_name(
+ facets, WEVT_FIELD_ACCOUNT,
+ rq->default_facet | FACET_KEY_OPTION_FTS);
+
+ facets_register_key_name(
+ facets, WEVT_FIELD_DOMAIN,
+ rq->default_facet | FACET_KEY_OPTION_FTS);
+
+ facets_register_key_name(
+ facets, WEVT_FIELD_SID,
+ rq->default_facet | FACET_KEY_OPTION_FTS);
+
+ facets_register_key_name(
+ facets, WEVT_FIELD_EVENTID,
+ rq->default_facet |
+ FACET_KEY_OPTION_VISIBLE | FACET_KEY_OPTION_FTS);
+
+ facets_register_key_name(
+ facets, WEVT_FIELD_EVENTS_API,
+ rq->default_facet |
+ FACET_KEY_OPTION_FTS);
+
+ facets_register_key_name(
+ facets, WEVT_FIELD_LEVEL,
+ rq->default_facet | FACET_KEY_OPTION_FTS | FACET_KEY_OPTION_EXPANDED_FILTER);
+
+ facets_register_key_name(
+ facets, WEVT_FIELD_LEVEL "ID",
+ FACET_KEY_OPTION_NONE);
+
+ facets_register_key_name(
+ facets, WEVT_FIELD_PROCESSID,
+ FACET_KEY_OPTION_FTS);
+
+ facets_register_key_name(
+ facets, WEVT_FIELD_THREADID,
+ FACET_KEY_OPTION_FTS);
+
+ facets_register_key_name(
+ facets, WEVT_FIELD_TASK,
+ rq->default_facet | FACET_KEY_OPTION_FTS | FACET_KEY_OPTION_VISIBLE);
+
+ facets_register_key_name(
+ facets, WEVT_FIELD_TASK "ID",
+ FACET_KEY_OPTION_NONE);
+
+ facets_register_key_name(
+ facets, WEVT_FIELD_OPCODE,
+ rq->default_facet | FACET_KEY_OPTION_FTS | FACET_KEY_OPTION_VISIBLE);
+
+ facets_register_key_name(
+ facets, WEVT_FIELD_OPCODE "ID",
+ FACET_KEY_OPTION_NONE);
+
+ facets_register_key_name(
+ facets, WEVT_FIELD_KEYWORDS,
+ rq->default_facet | FACET_KEY_OPTION_FTS);
+
+ facets_register_key_name(
+ facets, WEVT_FIELD_KEYWORDS "ID",
+ FACET_KEY_OPTION_NONE);
+
+ facets_register_dynamic_key_name(
+ facets,
+ WEVT_FIELD_MESSAGE,
+ FACET_KEY_OPTION_NEVER_FACET | FACET_KEY_OPTION_MAIN_TEXT | FACET_KEY_OPTION_VISIBLE,
+ wevt_lazy_load_message,
+ NULL);
+
+ facets_register_dynamic_key_name(
+ facets,
+ WEVT_FIELD_XML,
+ FACET_KEY_OPTION_NEVER_FACET | FACET_KEY_OPTION_PRETTY_XML,
+ wevt_lazy_load_xml,
+ NULL);
+
+ if(query_has_fts(lqs)) {
+ facets_register_key_name(
+ facets, WEVT_FIELD_EVENT_MESSAGE_HIDDEN,
+ FACET_KEY_OPTION_FTS | FACET_KEY_OPTION_HIDDEN | FACET_KEY_OPTION_NEVER_FACET);
+
+ facets_register_key_name(
+ facets, WEVT_FIELD_EVENT_XML_HIDDEN,
+ FACET_KEY_OPTION_FTS | FACET_KEY_OPTION_HIDDEN | FACET_KEY_OPTION_NEVER_FACET);
+
+ facets_register_key_name(
+ facets, WEVT_FIELD_EVENT_DATA_HIDDEN,
+ FACET_KEY_OPTION_FTS | FACET_KEY_OPTION_HIDDEN | FACET_KEY_OPTION_NEVER_FACET);
+ }
+
+#ifdef NETDATA_INTERNAL_CHECKS
+ facets_register_key_name(
+ facets, "z_level_source",
+ rq->default_facet);
+
+ facets_register_key_name(
+ facets, "z_keywords_source",
+ rq->default_facet);
+
+ facets_register_key_name(
+ facets, "z_opcode_source",
+ rq->default_facet);
+
+ facets_register_key_name(
+ facets, "z_task_source",
+ rq->default_facet);
+#endif
+}
+
+#ifdef NETDATA_INTERNAL_CHECKS
+static const char *source_to_str(TXT_UTF8 *txt) {
+ switch(txt->src) {
+ default:
+ case TXT_SOURCE_UNKNOWN:
+ return "unknown";
+
+ case TXT_SOURCE_EVENT_LOG:
+ return "event-log";
+
+ case TXT_SOURCE_PROVIDER:
+ return "provider";
+
+ case TXT_SOURCE_FIELD_CACHE:
+ return "fields-cache";
+
+ case TXT_SOURCE_HARDCODED:
+ return "hardcoded";
+ }
+}
+#endif
+
+static const char *events_api_to_str(WEVT_PROVIDER_PLATFORM platform) {
+ switch(platform) {
+ case WEVT_PLATFORM_WEL:
+ return "Windows Event Log";
+
+ case WEVT_PLATFORM_ETW:
+ return "Event Tracing for Windows";
+
+ case WEVT_PLATFORM_TL:
+ return "TraceLogging";
+
+ default:
+ return "Unknown";
+ }
+}
+
+static inline size_t wevt_process_event(WEVT_LOG *log, FACETS *facets, LOGS_QUERY_SOURCE *src, usec_t *msg_ut __maybe_unused, WEVT_EVENT *ev) {
+ static __thread char uuid_str[UUID_STR_LEN];
+
+ size_t len, bytes = log->ops.raw.system.used + log->ops.raw.user.used;
+
+ if(!UUIDiszero(ev->provider)) {
+ uuid_unparse_lower(ev->provider.uuid, uuid_str);
+ facets_add_key_value_length(
+ facets, WEVT_FIELD_PROVIDER_GUID, sizeof(WEVT_FIELD_PROVIDER_GUID) - 1,
+ uuid_str, sizeof(uuid_str) - 1);
+ }
+
+ if(!UUIDiszero(ev->activity_id)) {
+ uuid_unparse_lower(ev->activity_id.uuid, uuid_str);
+ facets_add_key_value_length(
+ facets, WEVT_FIELD_ACTIVITY_ID, sizeof(WEVT_FIELD_ACTIVITY_ID) - 1,
+ uuid_str, sizeof(uuid_str) - 1);
+ }
+
+ if(!UUIDiszero(ev->related_activity_id)) {
+ uuid_unparse_lower(ev->related_activity_id.uuid, uuid_str);
+ facets_add_key_value_length(
+ facets, WEVT_FIELD_RELATED_ACTIVITY_ID, sizeof(WEVT_FIELD_RELATED_ACTIVITY_ID) - 1,
+ uuid_str, sizeof(uuid_str) - 1);
+ }
+
+ if(ev->qualifiers) {
+ static __thread char qualifiers[UINT64_HEX_MAX_LENGTH];
+ len = print_uint64_hex(qualifiers, ev->qualifiers);
+ bytes += len;
+ facets_add_key_value_length(
+ facets, WEVT_FIELD_QUALIFIERS, sizeof(WEVT_FIELD_QUALIFIERS) - 1,
+ qualifiers, len);
+ }
+
+ {
+ static __thread char event_record_id_str[UINT64_MAX_LENGTH];
+ len = print_uint64(event_record_id_str, ev->id);
+ bytes += len;
+ facets_add_key_value_length(
+ facets, WEVT_FIELD_EVENTRECORDID, sizeof(WEVT_FIELD_EVENTRECORDID) - 1,
+ event_record_id_str, len);
+ }
+
+ if(ev->version) {
+ static __thread char version[UINT64_MAX_LENGTH];
+ len = print_uint64(version, ev->version);
+ bytes += len;
+ facets_add_key_value_length(
+ facets, WEVT_FIELD_VERSION, sizeof(WEVT_FIELD_VERSION) - 1,
+ version, len);
+ }
+
+ if(log->ops.provider.used > 1) {
+ bytes += log->ops.provider.used * 2; // unicode is double
+ facets_add_key_value_length(
+ facets, WEVT_FIELD_PROVIDER, sizeof(WEVT_FIELD_PROVIDER) - 1,
+ log->ops.provider.data, log->ops.provider.used - 1);
+ }
+
+ if(log->ops.channel.used > 1) {
+ bytes += log->ops.channel.used * 2;
+ facets_add_key_value_length(
+ facets, WEVT_FIELD_CHANNEL, sizeof(WEVT_FIELD_CHANNEL) - 1,
+ log->ops.channel.data, log->ops.channel.used - 1);
+ }
+ else {
+ bytes += src->fullname_len * 2;
+ facets_add_key_value_length(
+ facets, WEVT_FIELD_CHANNEL, sizeof(WEVT_FIELD_CHANNEL) - 1,
+ src->fullname, src->fullname_len);
+ }
+
+ if(log->ops.level.used > 1) {
+ bytes += log->ops.level.used * 2;
+ facets_add_key_value_length(
+ facets, WEVT_FIELD_LEVEL, sizeof(WEVT_FIELD_LEVEL) - 1,
+ log->ops.level.data, log->ops.level.used - 1);
+ }
+
+ if(log->ops.computer.used > 1) {
+ bytes += log->ops.computer.used * 2;
+ facets_add_key_value_length(
+ facets, WEVT_FIELD_COMPUTER, sizeof(WEVT_FIELD_COMPUTER) - 1,
+ log->ops.computer.data, log->ops.computer.used - 1);
+ }
+
+ if(log->ops.opcode.used > 1) {
+ bytes += log->ops.opcode.used * 2;
+ facets_add_key_value_length(
+ facets, WEVT_FIELD_OPCODE, sizeof(WEVT_FIELD_OPCODE) - 1,
+ log->ops.opcode.data, log->ops.opcode.used - 1);
+ }
+
+ if(log->ops.keywords.used > 1) {
+ bytes += log->ops.keywords.used * 2;
+ facets_add_key_value_length(
+ facets, WEVT_FIELD_KEYWORDS, sizeof(WEVT_FIELD_KEYWORDS) - 1,
+ log->ops.keywords.data, log->ops.keywords.used - 1);
+ }
+
+ if(log->ops.task.used > 1) {
+ bytes += log->ops.task.used * 2;
+ facets_add_key_value_length(
+ facets, WEVT_FIELD_TASK, sizeof(WEVT_FIELD_TASK) - 1,
+ log->ops.task.data, log->ops.task.used - 1);
+ }
+
+ if(log->ops.account.used > 1) {
+ bytes += log->ops.account.used * 2;
+ facets_add_key_value_length(
+ facets,
+ WEVT_FIELD_ACCOUNT, sizeof(WEVT_FIELD_ACCOUNT) - 1,
+ log->ops.account.data, log->ops.account.used - 1);
+ }
+
+ if(log->ops.domain.used > 1) {
+ bytes += log->ops.domain.used * 2;
+ facets_add_key_value_length(
+ facets,
+ WEVT_FIELD_DOMAIN, sizeof(WEVT_FIELD_DOMAIN) - 1,
+ log->ops.domain.data, log->ops.domain.used - 1);
+ }
+
+ if(log->ops.sid.used > 1) {
+ bytes += log->ops.sid.used * 2;
+ facets_add_key_value_length(
+ facets,
+ WEVT_FIELD_SID, sizeof(WEVT_FIELD_SID) - 1,
+ log->ops.sid.data, log->ops.sid.used - 1);
+ }
+
+ {
+ static __thread char event_id_str[UINT64_MAX_LENGTH];
+ len = print_uint64(event_id_str, ev->event_id);
+ bytes += len;
+ facets_add_key_value_length(
+ facets, WEVT_FIELD_EVENTID, sizeof(WEVT_FIELD_EVENTID) - 1,
+ event_id_str, len);
+ }
+
+ {
+ const char *s = events_api_to_str(ev->platform);
+ facets_add_key_value_length(
+ facets, WEVT_FIELD_EVENTS_API, sizeof(WEVT_FIELD_EVENTS_API) - 1, s, strlen(s));
+ }
+
+ if(ev->process_id) {
+ static __thread char process_id_str[UINT64_MAX_LENGTH];
+ len = print_uint64(process_id_str, ev->process_id);
+ bytes += len;
+ facets_add_key_value_length(
+ facets, WEVT_FIELD_PROCESSID, sizeof(WEVT_FIELD_PROCESSID) - 1,
+ process_id_str, len);
+ }
+
+ if(ev->thread_id) {
+ static __thread char thread_id_str[UINT64_MAX_LENGTH];
+ len = print_uint64(thread_id_str, ev->thread_id);
+ bytes += len;
+ facets_add_key_value_length(
+ facets, WEVT_FIELD_THREADID, sizeof(WEVT_FIELD_THREADID) - 1,
+ thread_id_str, len);
+ }
+
+ {
+ static __thread char str[UINT64_MAX_LENGTH];
+ len = print_uint64(str, ev->level);
+ bytes += len;
+ facets_add_key_value_length(
+ facets, WEVT_FIELD_LEVEL "ID", sizeof(WEVT_FIELD_LEVEL) + 2 - 1, str, len);
+ }
+
+ {
+ static __thread char str[UINT64_HEX_MAX_LENGTH];
+ len = print_uint64_hex_full(str, ev->keywords);
+ bytes += len;
+ facets_add_key_value_length(
+ facets, WEVT_FIELD_KEYWORDS "ID", sizeof(WEVT_FIELD_KEYWORDS) + 2 - 1, str, len);
+ }
+
+ {
+ static __thread char str[UINT64_MAX_LENGTH];
+ len = print_uint64(str, ev->opcode);
+ bytes += len;
+ facets_add_key_value_length(
+ facets, WEVT_FIELD_OPCODE "ID", sizeof(WEVT_FIELD_OPCODE) + 2 - 1, str, len);
+ }
+
+ {
+ static __thread char str[UINT64_MAX_LENGTH];
+ len = print_uint64(str, ev->task);
+ bytes += len;
+ facets_add_key_value_length(
+ facets, WEVT_FIELD_TASK "ID", sizeof(WEVT_FIELD_TASK) + 2 - 1, str, len);
+ }
+
+ if(log->type & WEVT_QUERY_EVENT_DATA) {
+ // the query has full text-search
+ if(log->ops.event.used > 1) {
+ bytes += log->ops.event.used;
+ facets_add_key_value_length(
+ facets, WEVT_FIELD_EVENT_MESSAGE_HIDDEN, sizeof(WEVT_FIELD_EVENT_MESSAGE_HIDDEN) - 1,
+ log->ops.event.data, log->ops.event.used - 1);
+ }
+
+ if(log->ops.xml.used > 1) {
+ bytes += log->ops.xml.used;
+ facets_add_key_value_length(
+ facets, WEVT_FIELD_EVENT_XML_HIDDEN, sizeof(WEVT_FIELD_EVENT_XML_HIDDEN) - 1,
+ log->ops.xml.data, log->ops.xml.used - 1);
+ }
+
+ if(log->ops.event_data->len) {
+ bytes += log->ops.event_data->len;
+ facets_add_key_value_length(
+ facets, WEVT_FIELD_EVENT_DATA_HIDDEN, sizeof(WEVT_FIELD_EVENT_DATA_HIDDEN) - 1,
+ buffer_tostring(log->ops.event_data), buffer_strlen(log->ops.event_data));
+ }
+ }
+
+ wevt_facets_register_bin_data(log, facets, ev);
+
+#ifdef NETDATA_INTERNAL_CHECKS
+ facets_add_key_value(facets, "z_level_source", source_to_str(&log->ops.level));
+ facets_add_key_value(facets, "z_keywords_source", source_to_str(&log->ops.keywords));
+ facets_add_key_value(facets, "z_opcode_source", source_to_str(&log->ops.opcode));
+ facets_add_key_value(facets, "z_task_source", source_to_str(&log->ops.task));
+#endif
+
+ return bytes;
+}
+
+static void send_progress_update(LOGS_QUERY_STATUS *lqs, size_t current_row_counter, bool flush_current_file) {
+ usec_t now_ut = now_monotonic_usec();
+
+ if(current_row_counter > lqs->c.progress.entries.current_query_total) {
+ lqs->c.progress.entries.total += current_row_counter - lqs->c.progress.entries.current_query_total;
+ lqs->c.progress.entries.current_query_total = current_row_counter;
+ }
+
+ if(flush_current_file) {
+ lqs->c.progress.entries.total += current_row_counter;
+ lqs->c.progress.entries.total -= lqs->c.progress.entries.current_query_total;
+ lqs->c.progress.entries.completed += current_row_counter;
+ lqs->c.progress.entries.current_query_total = 0;
+ }
+
+ size_t completed = lqs->c.progress.entries.completed + current_row_counter;
+ if(completed > lqs->c.progress.entries.total)
+ lqs->c.progress.entries.total = completed;
+
+ usec_t progress_duration_ut = now_ut - lqs->c.progress.last_ut;
+ if(progress_duration_ut >= WINDOWS_EVENTS_PROGRESS_EVERY_UT) {
+ lqs->c.progress.last_ut = now_ut;
+
+ netdata_mutex_lock(&stdout_mutex);
+ pluginsd_function_progress_to_stdout(lqs->rq.transaction, completed, lqs->c.progress.entries.total);
+ netdata_mutex_unlock(&stdout_mutex);
+ }
+}
+
+static WEVT_QUERY_STATUS wevt_query_backward(
+ WEVT_LOG *log, BUFFER *wb __maybe_unused, FACETS *facets,
+ LOGS_QUERY_SOURCE *src,
+ LOGS_QUERY_STATUS *lqs)
+{
+ usec_t start_ut = lqs->query.start_ut;
+ usec_t stop_ut = lqs->query.stop_ut;
+ bool stop_when_full = lqs->query.stop_when_full;
+
+// lqs->c.query_file.start_ut = start_ut;
+// lqs->c.query_file.stop_ut = stop_ut;
+
+ if(!wevt_query(log, channel2unicode(src->fullname), lqs->c.query, EvtQueryReverseDirection))
+ return WEVT_FAILED_TO_SEEK;
+
+ size_t errors_no_timestamp = 0;
+ usec_t latest_msg_ut = 0; // the biggest timestamp we have seen so far
+ usec_t first_msg_ut = 0; // the first message we got from the db
+ size_t row_counter = 0, last_row_counter = 0, rows_useful = 0;
+ size_t bytes = 0, last_bytes = 0;
+
+ usec_t last_usec_from = 0;
+ usec_t last_usec_to = 0;
+
+ WEVT_QUERY_STATUS status = WEVT_OK;
+
+ facets_rows_begin(facets);
+ WEVT_EVENT e;
+ while (status == WEVT_OK && wevt_get_next_event(log, &e)) {
+ usec_t msg_ut = e.created_ns / NSEC_PER_USEC;
+
+ if(unlikely(!msg_ut)) {
+ errors_no_timestamp++;
+ continue;
+ }
+
+ if (unlikely(msg_ut > start_ut))
+ continue;
+
+ if (unlikely(msg_ut < stop_ut))
+ break;
+
+ if(unlikely(msg_ut > latest_msg_ut))
+ latest_msg_ut = msg_ut;
+
+ if(unlikely(!first_msg_ut)) {
+ first_msg_ut = msg_ut;
+ // lqs->c.query_file.first_msg_ut = msg_ut;
+ }
+
+// sampling_t sample = is_row_in_sample(log, lqs, src, msg_ut,
+// FACETS_ANCHOR_DIRECTION_BACKWARD,
+// facets_row_candidate_to_keep(facets, msg_ut));
+//
+// if(sample == SAMPLING_FULL) {
+ bytes += wevt_process_event(log, facets, src, &msg_ut, &e);
+
+ // make sure each line gets a unique timestamp
+ if(unlikely(msg_ut >= last_usec_from && msg_ut <= last_usec_to))
+ msg_ut = --last_usec_from;
+ else
+ last_usec_from = last_usec_to = msg_ut;
+
+ if(facets_row_finished(facets, msg_ut))
+ rows_useful++;
+
+ row_counter++;
+ if(unlikely((row_counter % FUNCTION_DATA_ONLY_CHECK_EVERY_ROWS) == 0 &&
+ stop_when_full &&
+ facets_rows(facets) >= lqs->rq.entries)) {
+ // stop the data only query
+ usec_t oldest = facets_row_oldest_ut(facets);
+ if(oldest && msg_ut < (oldest - lqs->anchor.delta_ut))
+ break;
+ }
+
+ if(unlikely(row_counter % FUNCTION_PROGRESS_EVERY_ROWS == 0)) {
+ status = check_stop(lqs->cancelled, lqs->stop_monotonic_ut);
+
+ if(status == WEVT_OK) {
+ lqs->c.rows_read += row_counter - last_row_counter;
+ last_row_counter = row_counter;
+
+ lqs->c.bytes_read += bytes - last_bytes;
+ last_bytes = bytes;
+
+ send_progress_update(lqs, row_counter, false);
+ }
+ }
+// }
+// else if(sample == SAMPLING_SKIP_FIELDS)
+// facets_row_finished_unsampled(facets, msg_ut);
+// else {
+// sampling_update_running_query_file_estimates(facets, log, lqs, src, msg_ut, FACETS_ANCHOR_DIRECTION_BACKWARD);
+// break;
+// }
+ }
+
+ send_progress_update(lqs, row_counter, true);
+ lqs->c.rows_read += row_counter - last_row_counter;
+ lqs->c.bytes_read += bytes - last_bytes;
+ lqs->c.rows_useful += rows_useful;
+
+ if(errors_no_timestamp)
+ netdata_log_error("WINDOWS-EVENTS: %zu events did not have timestamps", errors_no_timestamp);
+
+ if(latest_msg_ut > lqs->last_modified)
+ lqs->last_modified = latest_msg_ut;
+
+ wevt_query_done(log);
+
+ return status;
+}
+
+static WEVT_QUERY_STATUS wevt_query_forward(
+ WEVT_LOG *log, BUFFER *wb __maybe_unused, FACETS *facets,
+ LOGS_QUERY_SOURCE *src,
+ LOGS_QUERY_STATUS *lqs)
+{
+ usec_t start_ut = lqs->query.start_ut;
+ usec_t stop_ut = lqs->query.stop_ut;
+ bool stop_when_full = lqs->query.stop_when_full;
+
+// lqs->c.query_file.start_ut = start_ut;
+// lqs->c.query_file.stop_ut = stop_ut;
+
+ if(!wevt_query(log, channel2unicode(src->fullname), lqs->c.query, EvtQueryForwardDirection))
+ return WEVT_FAILED_TO_SEEK;
+
+ size_t errors_no_timestamp = 0;
+ usec_t latest_msg_ut = 0; // the biggest timestamp we have seen so far
+ usec_t first_msg_ut = 0; // the first message we got from the db
+ size_t row_counter = 0, last_row_counter = 0, rows_useful = 0;
+ size_t bytes = 0, last_bytes = 0;
+
+ usec_t last_usec_from = 0;
+ usec_t last_usec_to = 0;
+
+ WEVT_QUERY_STATUS status = WEVT_OK;
+
+ facets_rows_begin(facets);
+ WEVT_EVENT e;
+ while (status == WEVT_OK && wevt_get_next_event(log, &e)) {
+ usec_t msg_ut = e.created_ns / NSEC_PER_USEC;
+
+ if(unlikely(!msg_ut)) {
+ errors_no_timestamp++;
+ continue;
+ }
+
+ if (unlikely(msg_ut < start_ut))
+ continue;
+
+ if (unlikely(msg_ut > stop_ut))
+ break;
+
+ if(likely(msg_ut > latest_msg_ut))
+ latest_msg_ut = msg_ut;
+
+ if(unlikely(!first_msg_ut)) {
+ first_msg_ut = msg_ut;
+ // lqs->c.query_file.first_msg_ut = msg_ut;
+ }
+
+// sampling_t sample = is_row_in_sample(log, lqs, src, msg_ut,
+// FACETS_ANCHOR_DIRECTION_FORWARD,
+// facets_row_candidate_to_keep(facets, msg_ut));
+//
+// if(sample == SAMPLING_FULL) {
+ bytes += wevt_process_event(log, facets, src, &msg_ut, &e);
+
+ // make sure each line gets a unique timestamp
+ if(unlikely(msg_ut >= last_usec_from && msg_ut <= last_usec_to))
+ msg_ut = ++last_usec_to;
+ else
+ last_usec_from = last_usec_to = msg_ut;
+
+ if(facets_row_finished(facets, msg_ut))
+ rows_useful++;
+
+ row_counter++;
+ if(unlikely((row_counter % FUNCTION_DATA_ONLY_CHECK_EVERY_ROWS) == 0 &&
+ stop_when_full &&
+ facets_rows(facets) >= lqs->rq.entries)) {
+ // stop the data only query
+ usec_t newest = facets_row_newest_ut(facets);
+ if(newest && msg_ut > (newest + lqs->anchor.delta_ut))
+ break;
+ }
+
+ if(unlikely(row_counter % FUNCTION_PROGRESS_EVERY_ROWS == 0)) {
+ status = check_stop(lqs->cancelled, lqs->stop_monotonic_ut);
+
+ if(status == WEVT_OK) {
+ lqs->c.rows_read += row_counter - last_row_counter;
+ last_row_counter = row_counter;
+
+ lqs->c.bytes_read += bytes - last_bytes;
+ last_bytes = bytes;
+
+ send_progress_update(lqs, row_counter, false);
+ }
+ }
+// }
+// else if(sample == SAMPLING_SKIP_FIELDS)
+// facets_row_finished_unsampled(facets, msg_ut);
+// else {
+// sampling_update_running_query_file_estimates(facets, log, lqs, src, msg_ut, FACETS_ANCHOR_DIRECTION_FORWARD);
+// break;
+// }
+ }
+
+ send_progress_update(lqs, row_counter, true);
+ lqs->c.rows_read += row_counter - last_row_counter;
+ lqs->c.bytes_read += bytes - last_bytes;
+ lqs->c.rows_useful += rows_useful;
+
+ if(errors_no_timestamp)
+ netdata_log_error("WINDOWS-EVENTS: %zu events did not have timestamps", errors_no_timestamp);
+
+ if(latest_msg_ut > lqs->last_modified)
+ lqs->last_modified = latest_msg_ut;
+
+ wevt_query_done(log);
+
+ return status;
+}
+
+static WEVT_QUERY_STATUS wevt_query_one_channel(
+ WEVT_LOG *log,
+ BUFFER *wb, FACETS *facets,
+ LOGS_QUERY_SOURCE *src,
+ LOGS_QUERY_STATUS *lqs) {
+
+ errno_clear();
+
+ WEVT_QUERY_STATUS status;
+ if(lqs->rq.direction == FACETS_ANCHOR_DIRECTION_FORWARD)
+ status = wevt_query_forward(log, wb, facets, src, lqs);
+ else
+ status = wevt_query_backward(log, wb, facets, src, lqs);
+
+ return status;
+}
+
+static bool source_is_mine(LOGS_QUERY_SOURCE *src, LOGS_QUERY_STATUS *lqs) {
+ if(
+ // no source is requested
+ (lqs->rq.source_type == WEVTS_NONE && !lqs->rq.sources) ||
+
+ // matches our internal source types
+ (src->source_type & lqs->rq.source_type) ||
+
+ // matches the source name
+ (lqs->rq.sources && src->source && simple_pattern_matches(lqs->rq.sources, string2str(src->source))) ||
+
+ // matches the provider (providers start with a special prefix to avoid mix and match)
+ (lqs->rq.sources && src->provider && simple_pattern_matches(lqs->rq.sources, string2str(src->provider)))
+
+ ) {
+
+ if(!src->msg_last_ut)
+ // the file is not scanned yet, or the timestamps have not been updated,
+ // so we don't know if it can contribute or not - let's add it.
+ return true;
+
+ usec_t anchor_delta = ANCHOR_DELTA_UT;
+ usec_t first_ut = src->msg_first_ut - anchor_delta;
+ usec_t last_ut = src->msg_last_ut + anchor_delta;
+
+ if(last_ut >= lqs->rq.after_ut && first_ut <= lqs->rq.before_ut)
+ return true;
+ }
+
+ return false;
+}
+
+static int wevt_master_query(BUFFER *wb __maybe_unused, LOGS_QUERY_STATUS *lqs __maybe_unused) {
+ // make sure the sources list is updated
+ wevt_sources_scan();
+
+ lqs->c.query = wevt_generate_query_no_xpath(lqs, wb);
+ if(!lqs->c.query)
+ return rrd_call_function_error(wb, "failed to generate query", HTTP_RESP_INTERNAL_SERVER_ERROR);
+
+ FACETS *facets = lqs->facets;
+
+ WEVT_QUERY_STATUS status = WEVT_NO_CHANNEL_MATCHED;
+
+ lqs->c.files_matched = 0;
+ lqs->c.file_working = 0;
+ lqs->c.rows_useful = 0;
+ lqs->c.rows_read = 0;
+ lqs->c.bytes_read = 0;
+
+ size_t files_used = 0;
+ size_t files_max = dictionary_entries(wevt_sources);
+ const DICTIONARY_ITEM *file_items[files_max];
+
+ // count the files
+ bool files_are_newer = false;
+ LOGS_QUERY_SOURCE *src;
+ dfe_start_read(wevt_sources, src) {
+ if(!source_is_mine(src, lqs))
+ continue;
+
+ file_items[files_used++] = dictionary_acquired_item_dup(wevt_sources, src_dfe.item);
+
+ if(src->msg_last_ut > lqs->rq.if_modified_since)
+ files_are_newer = true;
+
+ lqs->c.progress.entries.total += src->entries;
+ }
+ dfe_done(jf);
+
+ lqs->c.files_matched = files_used;
+
+ if(lqs->rq.if_modified_since && !files_are_newer) {
+ // release the files
+ for(size_t f = 0; f < files_used ;f++)
+ dictionary_acquired_item_release(wevt_sources, file_items[f]);
+
+ return rrd_call_function_error(wb, "not modified", HTTP_RESP_NOT_MODIFIED);
+ }
+
+ // sort the files, so that they are optimal for facets
+ if(files_used >= 2) {
+ if (lqs->rq.direction == FACETS_ANCHOR_DIRECTION_BACKWARD)
+ qsort(file_items, files_used, sizeof(const DICTIONARY_ITEM *),
+ wevt_sources_dict_items_backward_compar);
+ else
+ qsort(file_items, files_used, sizeof(const DICTIONARY_ITEM *),
+ wevt_sources_dict_items_forward_compar);
+ }
+
+ bool partial = false;
+ usec_t query_started_ut = now_monotonic_usec();
+ usec_t started_ut = query_started_ut;
+ usec_t ended_ut = started_ut;
+ usec_t duration_ut, max_duration_ut = 0;
+
+ WEVT_LOG *log = wevt_openlog6(query_has_fts(lqs) ? WEVT_QUERY_FTS : WEVT_QUERY_NORMAL);
+ if(!log) {
+ // release the files
+ for(size_t f = 0; f < files_used ;f++)
+ dictionary_acquired_item_release(wevt_sources, file_items[f]);
+
+ netdata_log_error("WINDOWS EVENTS: cannot open windows event log");
+ return rrd_call_function_error(wb, "cannot open windows events log", HTTP_RESP_INTERNAL_SERVER_ERROR);
+ }
+
+ // sampling_query_init(lqs, facets);
+
+ buffer_json_member_add_array(wb, "_channels");
+ for(size_t f = 0; f < files_used ;f++) {
+ const char *fullname = dictionary_acquired_item_name(file_items[f]);
+ src = dictionary_acquired_item_value(file_items[f]);
+
+ if(!source_is_mine(src, lqs))
+ continue;
+
+ started_ut = ended_ut;
+
+ // do not even try to do the query if we expect it to pass the timeout
+ if(ended_ut + max_duration_ut * 3 >= *lqs->stop_monotonic_ut) {
+ partial = true;
+ status = WEVT_TIMED_OUT;
+ break;
+ }
+
+ lqs->c.file_working++;
+
+ size_t rows_useful = lqs->c.rows_useful;
+ size_t rows_read = lqs->c.rows_read;
+ size_t bytes_read = lqs->c.bytes_read;
+ size_t matches_setup_ut = lqs->c.matches_setup_ut;
+
+ // sampling_file_init(lqs, src);
+
+ lqs->c.progress.entries.current_query_total = src->entries;
+ WEVT_QUERY_STATUS tmp_status = wevt_query_one_channel(log, wb, facets, src, lqs);
+
+ rows_useful = lqs->c.rows_useful - rows_useful;
+ rows_read = lqs->c.rows_read - rows_read;
+ bytes_read = lqs->c.bytes_read - bytes_read;
+ matches_setup_ut = lqs->c.matches_setup_ut - matches_setup_ut;
+
+ ended_ut = now_monotonic_usec();
+ duration_ut = ended_ut - started_ut;
+
+ if(duration_ut > max_duration_ut)
+ max_duration_ut = duration_ut;
+
+ buffer_json_add_array_item_object(wb); // channel source
+ {
+ // information about the file
+ buffer_json_member_add_string(wb, "_name", fullname);
+ buffer_json_member_add_uint64(wb, "_source_type", src->source_type);
+ buffer_json_member_add_string(wb, "_source", string2str(src->source));
+ buffer_json_member_add_uint64(wb, "_msg_first_ut", src->msg_first_ut);
+ buffer_json_member_add_uint64(wb, "_msg_last_ut", src->msg_last_ut);
+
+ // information about the current use of the file
+ buffer_json_member_add_uint64(wb, "duration_ut", ended_ut - started_ut);
+ buffer_json_member_add_uint64(wb, "rows_read", rows_read);
+ buffer_json_member_add_uint64(wb, "rows_useful", rows_useful);
+ buffer_json_member_add_double(wb, "rows_per_second", (double) rows_read / (double) duration_ut * (double) USEC_PER_SEC);
+ buffer_json_member_add_uint64(wb, "bytes_read", bytes_read);
+ buffer_json_member_add_double(wb, "bytes_per_second", (double) bytes_read / (double) duration_ut * (double) USEC_PER_SEC);
+ buffer_json_member_add_uint64(wb, "duration_matches_ut", matches_setup_ut);
+
+ // if(lqs->rq.sampling) {
+ // buffer_json_member_add_object(wb, "_sampling");
+ // {
+ // buffer_json_member_add_uint64(wb, "sampled", lqs->c.samples_per_file.sampled);
+ // buffer_json_member_add_uint64(wb, "unsampled", lqs->c.samples_per_file.unsampled);
+ // buffer_json_member_add_uint64(wb, "estimated", lqs->c.samples_per_file.estimated);
+ // }
+ // buffer_json_object_close(wb); // _sampling
+ // }
+ }
+ buffer_json_object_close(wb); // channel source
+
+ bool stop = false;
+ switch(tmp_status) {
+ case WEVT_OK:
+ case WEVT_NO_CHANNEL_MATCHED:
+ status = (status == WEVT_OK) ? WEVT_OK : tmp_status;
+ break;
+
+ case WEVT_FAILED_TO_OPEN:
+ case WEVT_FAILED_TO_SEEK:
+ partial = true;
+ if(status == WEVT_NO_CHANNEL_MATCHED)
+ status = tmp_status;
+ break;
+
+ case WEVT_CANCELLED:
+ case WEVT_TIMED_OUT:
+ partial = true;
+ stop = true;
+ status = tmp_status;
+ break;
+
+ case WEVT_NOT_MODIFIED:
+ internal_fatal(true, "this should never be returned here");
+ break;
+ }
+
+ if(stop)
+ break;
+ }
+ buffer_json_array_close(wb); // _channels
+
+ // release the files
+ for(size_t f = 0; f < files_used ;f++)
+ dictionary_acquired_item_release(wevt_sources, file_items[f]);
+
+ switch (status) {
+ case WEVT_OK:
+ if(lqs->rq.if_modified_since && !lqs->c.rows_useful)
+ return rrd_call_function_error(wb, "no useful logs, not modified", HTTP_RESP_NOT_MODIFIED);
+ break;
+
+ case WEVT_TIMED_OUT:
+ case WEVT_NO_CHANNEL_MATCHED:
+ break;
+
+ case WEVT_CANCELLED:
+ return rrd_call_function_error(wb, "client closed connection", HTTP_RESP_CLIENT_CLOSED_REQUEST);
+
+ case WEVT_NOT_MODIFIED:
+ return rrd_call_function_error(wb, "not modified", HTTP_RESP_NOT_MODIFIED);
+
+ case WEVT_FAILED_TO_OPEN:
+ return rrd_call_function_error(wb, "failed to open event log", HTTP_RESP_INTERNAL_SERVER_ERROR);
+
+ case WEVT_FAILED_TO_SEEK:
+ return rrd_call_function_error(wb, "failed to execute event log query", HTTP_RESP_INTERNAL_SERVER_ERROR);
+
+ default:
+ return rrd_call_function_error(wb, "unknown status", HTTP_RESP_INTERNAL_SERVER_ERROR);
+ }
+
+ buffer_json_member_add_uint64(wb, "status", HTTP_RESP_OK);
+ buffer_json_member_add_boolean(wb, "partial", partial);
+ buffer_json_member_add_string(wb, "type", "table");
+
+ // build a message for the query
+ if(!lqs->rq.data_only) {
+ CLEAN_BUFFER *msg = buffer_create(0, NULL);
+ CLEAN_BUFFER *msg_description = buffer_create(0, NULL);
+ ND_LOG_FIELD_PRIORITY msg_priority = NDLP_INFO;
+
+ // if(!journal_files_completed_once()) {
+ // buffer_strcat(msg, "Journals are still being scanned. ");
+ // buffer_strcat(msg_description
+ // , "LIBRARY SCAN: The journal files are still being scanned, you are probably viewing incomplete data. ");
+ // msg_priority = NDLP_WARNING;
+ // }
+
+ if(partial) {
+ buffer_strcat(msg, "Query timed-out, incomplete data. ");
+ buffer_strcat(msg_description
+ , "QUERY TIMEOUT: The query timed out and may not include all the data of the selected window. ");
+ msg_priority = NDLP_WARNING;
+ }
+
+ // if(lqs->c.samples.estimated || lqs->c.samples.unsampled) {
+ // double percent = (double) (lqs->c.samples.sampled * 100.0 /
+ // (lqs->c.samples.estimated + lqs->c.samples.unsampled + lqs->c.samples.sampled));
+ // buffer_sprintf(msg, "%.2f%% real data", percent);
+ // buffer_sprintf(msg_description, "ACTUAL DATA: The filters counters reflect %0.2f%% of the data. ", percent);
+ // msg_priority = MIN(msg_priority, NDLP_NOTICE);
+ // }
+ //
+ // if(lqs->c.samples.unsampled) {
+ // double percent = (double) (lqs->c.samples.unsampled * 100.0 /
+ // (lqs->c.samples.estimated + lqs->c.samples.unsampled + lqs->c.samples.sampled));
+ // buffer_sprintf(msg, ", %.2f%% unsampled", percent);
+ // buffer_sprintf(msg_description
+ // , "UNSAMPLED DATA: %0.2f%% of the events exist and have been counted, but their values have not been evaluated, so they are not included in the filters counters. "
+ // , percent);
+ // msg_priority = MIN(msg_priority, NDLP_NOTICE);
+ // }
+ //
+ // if(lqs->c.samples.estimated) {
+ // double percent = (double) (lqs->c.samples.estimated * 100.0 /
+ // (lqs->c.samples.estimated + lqs->c.samples.unsampled + lqs->c.samples.sampled));
+ // buffer_sprintf(msg, ", %.2f%% estimated", percent);
+ // buffer_sprintf(msg_description
+ // , "ESTIMATED DATA: The query selected a large amount of data, so to avoid delaying too much, the presented data are estimated by %0.2f%%. "
+ // , percent);
+ // msg_priority = MIN(msg_priority, NDLP_NOTICE);
+ // }
+
+ buffer_json_member_add_object(wb, "message");
+ if(buffer_tostring(msg)) {
+ buffer_json_member_add_string(wb, "title", buffer_tostring(msg));
+ buffer_json_member_add_string(wb, "description", buffer_tostring(msg_description));
+ buffer_json_member_add_string(wb, "status", nd_log_id2priority(msg_priority));
+ }
+ // else send an empty object if there is nothing to tell
+ buffer_json_object_close(wb); // message
+ }
+
+ if(!lqs->rq.data_only) {
+ buffer_json_member_add_time_t(wb, "update_every", 1);
+ buffer_json_member_add_string(wb, "help", WEVT_FUNCTION_DESCRIPTION);
+ }
+
+ if(!lqs->rq.data_only || lqs->rq.tail)
+ buffer_json_member_add_uint64(wb, "last_modified", lqs->last_modified);
+
+ facets_sort_and_reorder_keys(facets);
+ facets_report(facets, wb, used_hashes_registry);
+
+ wb->expires = now_realtime_sec() + (lqs->rq.data_only ? 3600 : 0);
+ buffer_json_member_add_time_t(wb, "expires", wb->expires);
+
+ // if(lqs->rq.sampling) {
+ // buffer_json_member_add_object(wb, "_sampling");
+ // {
+ // buffer_json_member_add_uint64(wb, "sampled", lqs->c.samples.sampled);
+ // buffer_json_member_add_uint64(wb, "unsampled", lqs->c.samples.unsampled);
+ // buffer_json_member_add_uint64(wb, "estimated", lqs->c.samples.estimated);
+ // }
+ // buffer_json_object_close(wb); // _sampling
+ // }
+
+ wevt_closelog6(log);
+
+ wb->content_type = CT_APPLICATION_JSON;
+ wb->response_code = HTTP_RESP_OK;
+ return wb->response_code;
+}
+
+void function_windows_events(const char *transaction, char *function, usec_t *stop_monotonic_ut, bool *cancelled,
+ BUFFER *payload, HTTP_ACCESS access __maybe_unused,
+ const char *source __maybe_unused, void *data __maybe_unused) {
+ bool have_slice = LQS_DEFAULT_SLICE_MODE;
+
+ LOGS_QUERY_STATUS tmp_fqs = {
+ .facets = lqs_facets_create(
+ LQS_DEFAULT_ITEMS_PER_QUERY,
+ FACETS_OPTION_ALL_KEYS_FTS | FACETS_OPTION_HASH_IDS,
+ WEVT_ALWAYS_VISIBLE_KEYS,
+ WEVT_KEYS_INCLUDED_IN_FACETS,
+ WEVT_KEYS_EXCLUDED_FROM_FACETS,
+ have_slice),
+
+ .rq = LOGS_QUERY_REQUEST_DEFAULTS(transaction, have_slice, FACETS_ANCHOR_DIRECTION_BACKWARD),
+
+ .cancelled = cancelled,
+ .stop_monotonic_ut = stop_monotonic_ut,
+ };
+ LOGS_QUERY_STATUS *lqs = &tmp_fqs;
+
+ CLEAN_BUFFER *wb = lqs_create_output_buffer();
+
+ // ------------------------------------------------------------------------
+ // parse the parameters
+
+ if(lqs_request_parse_and_validate(lqs, wb, function, payload, have_slice, WEVT_FIELD_LEVEL)) {
+ wevt_register_fields(lqs);
+
+ // ------------------------------------------------------------------------
+ // add versions to the response
+
+ buffer_json_wevt_versions(wb);
+
+ // ------------------------------------------------------------------------
+ // run the request
+
+ if (lqs->rq.info)
+ lqs_info_response(wb, lqs->facets);
+ else {
+ wevt_master_query(wb, lqs);
+ if (wb->response_code == HTTP_RESP_OK)
+ buffer_json_finalize(wb);
+ }
+ }
+
+ netdata_mutex_lock(&stdout_mutex);
+ pluginsd_function_result_to_stdout(transaction, wb);
+ netdata_mutex_unlock(&stdout_mutex);
+
+ lqs_cleanup(lqs);
+}
+
+int main(int argc __maybe_unused, char **argv __maybe_unused) {
+ nd_thread_tag_set("wevt.plugin");
+ nd_log_initialize_for_external_plugins("windows-events.plugin");
+
+ // ------------------------------------------------------------------------
+ // initialization
+
+ wevt_sources_init();
+ provider_cache_init();
+ cached_sid_username_init();
+ field_cache_init();
+
+ if(!EnableWindowsPrivilege(SE_SECURITY_NAME))
+ nd_log(NDLS_COLLECTORS, NDLP_WARNING, "Failed to enable %s privilege", SE_SECURITY_NAME);
+
+ if(!EnableWindowsPrivilege(SE_BACKUP_NAME))
+ nd_log(NDLS_COLLECTORS, NDLP_WARNING, "Failed to enable %s privilege", SE_BACKUP_NAME);
+
+ if(!EnableWindowsPrivilege(SE_AUDIT_NAME))
+ nd_log(NDLS_COLLECTORS, NDLP_WARNING, "Failed to enable %s privilege", SE_AUDIT_NAME);
+
+ // ------------------------------------------------------------------------
+ // debug
+
+ if(argc >= 2 && strcmp(argv[argc - 1], "debug") == 0) {
+ wevt_sources_scan();
+
+ struct {
+ const char *func;
+ } array[] = {
+ { "windows-events after:-8640000 before:0 last:200 source:All" },
+ //{ "windows-events after:-86400 before:0 direction:backward last:200 facets:HdUoSYab5wV,Cq2r7mRUv4a,LAnVlsIQfeD,BnPLNbA5VWT,KeCITtVD5AD,HytMJ9kj82B,JM3OPW3kHn6,H106l8MXSSr,HREiMN.4Ahu,ClaDGnYSQE7,ApYltST_icg,PtkRm91M0En data_only:false slice:true source:All" },
+ //{ "windows-events after:1726055370 before:1726056270 direction:backward last:200 facets:HdUoSYab5wV,Cq2r7mRUv4a,LAnVlsIQfeD,BnPLNbA5VWT,KeCITtVD5AD,HytMJ9kj82B,LT.Xp9I9tiP,No4kPTQbS.g,LQ2LQzfE8EG,PtkRm91M0En,JM3OPW3kHn6,ClaDGnYSQE7,H106l8MXSSr,HREiMN.4Ahu data_only:false source:All HytMJ9kj82B:BlC24d5JBBV,PtVoyIuX.MU,HMj1B38kHTv KeCITtVD5AD:PY1JtCeWwSe,O9kz5J37nNl,JZoJURadhDb" },
+ // { "windows-events after:1725636012 before:1726240812 direction:backward last:200 facets:HdUoSYab5wV,Cq2r7mRUv4a,LAnVlsIQfeD,BnPLNbA5VWT,KeCITtVD5AD,HytMJ9kj82B,JM3OPW3kHn6,H106l8MXSSr,HREiMN.4Ahu,ClaDGnYSQE7,ApYltST_icg,PtkRm91M0En data_only:false source:All PtkRm91M0En:LDzHbP5libb" },
+ //{ "windows-events after:1725650386 before:1725736786 anchor:1725652420809461 direction:forward last:200 facets:HWNGeY7tg6c,LAnVlsIQfeD,BnPLNbA5VWT,Cq2r7mRUv4a,KeCITtVD5AD,I_Amz_APBm3,HytMJ9kj82B,LT.Xp9I9tiP,No4kPTQbS.g,LQ2LQzfE8EG,PtkRm91M0En,JM3OPW3kHn6 if_modified_since:1725736649011085 data_only:true delta:true tail:true source:all Cq2r7mRUv4a:PPc9fUy.q6o No4kPTQbS.g:Dwo9PhK27v3 HytMJ9kj82B:KbbznGjt_9r LAnVlsIQfeD:OfU1t5cpjgG JM3OPW3kHn6:CS_0g5AEpy2" },
+ //{ "windows-events info after:1725650420 before:1725736820" },
+ //{ "windows-events after:1725650420 before:1725736820 last:200 facets:HWNGeY7tg6c,LAnVlsIQfeD,BnPLNbA5VWT,Cq2r7mRUv4a,KeCITtVD5AD,I_Amz_APBm3,HytMJ9kj82B,LT.Xp9I9tiP,No4kPTQbS.g,LQ2LQzfE8EG,PtkRm91M0En,JM3OPW3kHn6 source:all Cq2r7mRUv4a:PPc9fUy.q6o No4kPTQbS.g:Dwo9PhK27v3 HytMJ9kj82B:KbbznGjt_9r LAnVlsIQfeD:OfU1t5cpjgG JM3OPW3kHn6:CS_0g5AEpy2" },
+ //{ "windows-events after:1725650430 before:1725736830 last:200 facets:HWNGeY7tg6c,LAnVlsIQfeD,BnPLNbA5VWT,Cq2r7mRUv4a,KeCITtVD5AD,I_Amz_APBm3,HytMJ9kj82B,LT.Xp9I9tiP,No4kPTQbS.g,LQ2LQzfE8EG,PtkRm91M0En,JM3OPW3kHn6 source:all Cq2r7mRUv4a:PPc9fUy.q6o No4kPTQbS.g:Dwo9PhK27v3 HytMJ9kj82B:KbbznGjt_9r LAnVlsIQfeD:OfU1t5cpjgG JM3OPW3kHn6:CS_0g5AEpy2" },
+ { NULL },
+ };
+
+ for(int i = 0; array[i].func ;i++) {
+ bool cancelled = false;
+ usec_t stop_monotonic_ut = now_monotonic_usec() + 600 * USEC_PER_SEC;
+ //char buf[] = "windows-events after:-86400 before:0 direction:backward last:200 data_only:false slice:true source:all";
+ function_windows_events("123", (char *)array[i].func, &stop_monotonic_ut, &cancelled, NULL, HTTP_ACCESS_ALL, NULL, NULL);
+ }
+ printf("\n\nAll done!\n\n");
+ fflush(stdout);
+ exit(1);
+ }
+
+ // ------------------------------------------------------------------------
+ // the event loop for functions
+
+ struct functions_evloop_globals *wg =
+ functions_evloop_init(WINDOWS_EVENTS_WORKER_THREADS, "WEVT", &stdout_mutex, &plugin_should_exit);
+
+ functions_evloop_add_function(wg,
+ WEVT_FUNCTION_NAME,
+ function_windows_events,
+ WINDOWS_EVENTS_DEFAULT_TIMEOUT,
+ NULL);
+
+ // ------------------------------------------------------------------------
+ // register functions to netdata
+
+ netdata_mutex_lock(&stdout_mutex);
+
+ fprintf(stdout, PLUGINSD_KEYWORD_FUNCTION " GLOBAL \"%s\" %d \"%s\" \"logs\" "HTTP_ACCESS_FORMAT" %d\n",
+ WEVT_FUNCTION_NAME, WINDOWS_EVENTS_DEFAULT_TIMEOUT, WEVT_FUNCTION_DESCRIPTION,
+ (HTTP_ACCESS_FORMAT_CAST)(HTTP_ACCESS_SIGNED_ID | HTTP_ACCESS_SAME_SPACE | HTTP_ACCESS_SENSITIVE_DATA),
+ RRDFUNCTIONS_PRIORITY_DEFAULT);
+
+ fflush(stdout);
+ netdata_mutex_unlock(&stdout_mutex);
+
+ // ------------------------------------------------------------------------
+
+ usec_t send_newline_ut = 0;
+ usec_t since_last_scan_ut = WINDOWS_EVENTS_SCAN_EVERY_USEC * 2; // something big to trigger scanning at start
+ usec_t since_last_providers_release_ut = 0;
+ const bool tty = isatty(fileno(stdout)) == 1;
+
+ heartbeat_t hb;
+ heartbeat_init(&hb, USEC_PER_SEC);
+ while(!plugin_should_exit) {
+
+ if(since_last_scan_ut > WINDOWS_EVENTS_SCAN_EVERY_USEC) {
+ wevt_sources_scan();
+ since_last_scan_ut = 0;
+ }
+
+ if(since_last_providers_release_ut > WINDOWS_EVENTS_RELEASE_PROVIDERS_HANDLES_EVERY_UT) {
+ providers_release_unused_handles();
+ since_last_providers_release_ut = 0;
+ }
+
+ usec_t dt_ut = heartbeat_next(&hb);
+ since_last_providers_release_ut += dt_ut;
+ since_last_scan_ut += dt_ut;
+ send_newline_ut += dt_ut;
+
+ if(!tty && send_newline_ut > USEC_PER_SEC) {
+ send_newline_and_flush(&stdout_mutex);
+ send_newline_ut = 0;
+ }
+ }
+
+ exit(0);
+}
diff --git a/src/collectors/windows-events.plugin/windows-events.h b/src/collectors/windows-events.plugin/windows-events.h
new file mode 100644
index 000000000..34d600a98
--- /dev/null
+++ b/src/collectors/windows-events.plugin/windows-events.h
@@ -0,0 +1,262 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_WINDOWS_EVENTS_H
+#define NETDATA_WINDOWS_EVENTS_H
+
+#include "libnetdata/libnetdata.h"
+#include "collectors/all.h"
+
+typedef enum {
+ WEVT_NO_CHANNEL_MATCHED,
+ WEVT_FAILED_TO_OPEN,
+ WEVT_FAILED_TO_SEEK,
+ WEVT_TIMED_OUT,
+ WEVT_OK,
+ WEVT_NOT_MODIFIED,
+ WEVT_CANCELLED,
+} WEVT_QUERY_STATUS;
+
+#define WEVT_CHANNEL_CLASSIC_TRACE 0x0
+#define WEVT_CHANNEL_GLOBAL_SYSTEM 0x8
+#define WEVT_CHANNEL_GLOBAL_APPLICATION 0x9
+#define WEVT_CHANNEL_GLOBAL_SECURITY 0xa
+
+#define WEVT_LEVEL_NONE 0x0
+#define WEVT_LEVEL_CRITICAL 0x1
+#define WEVT_LEVEL_ERROR 0x2
+#define WEVT_LEVEL_WARNING 0x3
+#define WEVT_LEVEL_INFORMATION 0x4
+#define WEVT_LEVEL_VERBOSE 0x5
+#define WEVT_LEVEL_RESERVED_6 0x6
+#define WEVT_LEVEL_RESERVED_7 0x7
+#define WEVT_LEVEL_RESERVED_8 0x8
+#define WEVT_LEVEL_RESERVED_9 0x9
+#define WEVT_LEVEL_RESERVED_10 0xa
+#define WEVT_LEVEL_RESERVED_11 0xb
+#define WEVT_LEVEL_RESERVED_12 0xc
+#define WEVT_LEVEL_RESERVED_13 0xd
+#define WEVT_LEVEL_RESERVED_14 0xe
+#define WEVT_LEVEL_RESERVED_15 0xf
+
+#define WEVT_OPCODE_INFO 0x0
+#define WEVT_OPCODE_START 0x1
+#define WEVT_OPCODE_STOP 0x2
+#define WEVT_OPCODE_DC_START 0x3
+#define WEVT_OPCODE_DC_STOP 0x4
+#define WEVT_OPCODE_EXTENSION 0x5
+#define WEVT_OPCODE_REPLY 0x6
+#define WEVT_OPCODE_RESUME 0x7
+#define WEVT_OPCODE_SUSPEND 0x8
+#define WEVT_OPCODE_SEND 0x9
+#define WEVT_OPCODE_RECEIVE 0xf0
+#define WEVT_OPCODE_RESERVED_241 0xf1
+#define WEVT_OPCODE_RESERVED_242 0xf2
+#define WEVT_OPCODE_RESERVED_243 0xf3
+#define WEVT_OPCODE_RESERVED_244 0xf4
+#define WEVT_OPCODE_RESERVED_245 0xf5
+#define WEVT_OPCODE_RESERVED_246 0xf6
+#define WEVT_OPCODE_RESERVED_247 0xf7
+#define WEVT_OPCODE_RESERVED_248 0xf8
+#define WEVT_OPCODE_RESERVED_249 0xf9
+#define WEVT_OPCODE_RESERVED_250 0xfa
+#define WEVT_OPCODE_RESERVED_251 0xfb
+#define WEVT_OPCODE_RESERVED_252 0xfc
+#define WEVT_OPCODE_RESERVED_253 0xfd
+#define WEVT_OPCODE_RESERVED_254 0xfe
+#define WEVT_OPCODE_RESERVED_255 0xff
+
+#define WEVT_TASK_NONE 0x0
+
+#define WEVT_KEYWORD_NONE 0x0
+#define WEVT_KEYWORD_RESPONSE_TIME 0x0001000000000000
+#define WEVT_KEYWORD_WDI_CONTEXT 0x0002000000000000
+#define WEVT_KEYWORD_WDI_DIAG 0x0004000000000000
+#define WEVT_KEYWORD_SQM 0x0008000000000000
+#define WEVT_KEYWORD_AUDIT_FAILURE 0x0010000000000000
+#define WEVT_KEYWORD_AUDIT_SUCCESS 0x0020000000000000
+#define WEVT_KEYWORD_CORRELATION_HINT 0x0040000000000000
+#define WEVT_KEYWORD_EVENTLOG_CLASSIC 0x0080000000000000
+#define WEVT_KEYWORD_RESERVED_56 0x0100000000000000
+#define WEVT_KEYWORD_RESERVED_57 0x0200000000000000
+#define WEVT_KEYWORD_RESERVED_58 0x0400000000000000
+#define WEVT_KEYWORD_RESERVED_59 0x0800000000000000
+#define WEVT_KEYWORDE_RESERVED_60 0x1000000000000000
+#define WEVT_KEYWORD_RESERVED_61 0x2000000000000000
+#define WEVT_KEYWORD_RESERVED_62 0x4000000000000000
+#define WEVT_KEYWORD_RESERVED_63 0x8000000000000000
+
+#define WEVT_LEVEL_NAME_NONE "None"
+#define WEVT_LEVEL_NAME_CRITICAL "Critical"
+#define WEVT_LEVEL_NAME_ERROR "Error"
+#define WEVT_LEVEL_NAME_WARNING "Warning"
+#define WEVT_LEVEL_NAME_INFORMATION "Information"
+#define WEVT_LEVEL_NAME_VERBOSE "Verbose"
+
+#define WEVT_OPCODE_NAME_INFO "Info"
+#define WEVT_OPCODE_NAME_START "Start"
+#define WEVT_OPCODE_NAME_STOP "Stop"
+#define WEVT_OPCODE_NAME_DC_START "DC Start"
+#define WEVT_OPCODE_NAME_DC_STOP "DC Stop"
+#define WEVT_OPCODE_NAME_EXTENSION "Extension"
+#define WEVT_OPCODE_NAME_REPLY "Reply"
+#define WEVT_OPCODE_NAME_RESUME "Resume"
+#define WEVT_OPCODE_NAME_SUSPEND "Suspend"
+#define WEVT_OPCODE_NAME_SEND "Send"
+#define WEVT_OPCODE_NAME_RECEIVE "Receive"
+
+#define WEVT_TASK_NAME_NONE "None"
+
+#define WEVT_KEYWORD_NAME_NONE "None"
+#define WEVT_KEYWORD_NAME_RESPONSE_TIME "Response Time"
+#define WEVT_KEYWORD_NAME_WDI_CONTEXT "WDI Context"
+#define WEVT_KEYWORD_NAME_WDI_DIAG "WDI Diagnostics"
+#define WEVT_KEYWORD_NAME_SQM "SQM (Software Quality Metrics)"
+#define WEVT_KEYWORD_NAME_AUDIT_FAILURE "Audit Failure"
+#define WEVT_KEYWORD_NAME_AUDIT_SUCCESS "Audit Success"
+#define WEVT_KEYWORD_NAME_CORRELATION_HINT "Correlation Hint"
+#define WEVT_KEYWORD_NAME_EVENTLOG_CLASSIC "Event Log Classic"
+
+#define WEVT_PREFIX_LEVEL "Level " // the space at the end is needed
+#define WEVT_PREFIX_KEYWORDS "Keywords " // the space at the end is needed
+#define WEVT_PREFIX_OPCODE "Opcode " // the space at the end is needed
+#define WEVT_PREFIX_TASK "Task " // the space at the end is needed
+
+#include "windows-events-sources.h"
+#include "windows-events-unicode.h"
+#include "windows-events-xml.h"
+#include "windows-events-providers.h"
+#include "windows-events-fields-cache.h"
+#include "windows-events-query.h"
+
+// enable or disable preloading on full-text-search
+#define ON_FTS_PRELOAD_MESSAGE 1
+#define ON_FTS_PRELOAD_XML 0
+#define ON_FTS_PRELOAD_EVENT_DATA 1
+
+#define WEVT_FUNCTION_DESCRIPTION "View, search and analyze the Microsoft Windows Events log."
+#define WEVT_FUNCTION_NAME "windows-events"
+
+#define WINDOWS_EVENTS_WORKER_THREADS 5
+#define WINDOWS_EVENTS_DEFAULT_TIMEOUT 600
+#define WINDOWS_EVENTS_SCAN_EVERY_USEC (5 * 60 * USEC_PER_SEC)
+#define WINDOWS_EVENTS_PROGRESS_EVERY_UT (250 * USEC_PER_MS)
+#define FUNCTION_PROGRESS_EVERY_ROWS (2000)
+#define FUNCTION_DATA_ONLY_CHECK_EVERY_ROWS (1000)
+#define ANCHOR_DELTA_UT (10 * USEC_PER_SEC)
+
+// run providers release every 5 mins
+#define WINDOWS_EVENTS_RELEASE_PROVIDERS_HANDLES_EVERY_UT (5 * 60 * USEC_PER_SEC)
+// release idle handles that are older than 5 mins
+#define WINDOWS_EVENTS_RELEASE_IDLE_PROVIDER_HANDLES_TIME_UT (5 * 60 * USEC_PER_SEC)
+
+#define WEVT_FIELD_COMPUTER "Computer"
+#define WEVT_FIELD_CHANNEL "Channel"
+#define WEVT_FIELD_PROVIDER "Provider"
+#define WEVT_FIELD_PROVIDER_GUID "ProviderGUID"
+#define WEVT_FIELD_EVENTRECORDID "EventRecordID"
+#define WEVT_FIELD_VERSION "Version"
+#define WEVT_FIELD_QUALIFIERS "Qualifiers"
+#define WEVT_FIELD_EVENTID "EventID"
+#define WEVT_FIELD_LEVEL "Level"
+#define WEVT_FIELD_KEYWORDS "Keywords"
+#define WEVT_FIELD_OPCODE "Opcode"
+#define WEVT_FIELD_ACCOUNT "UserAccount"
+#define WEVT_FIELD_DOMAIN "UserDomain"
+#define WEVT_FIELD_SID "UserSID"
+#define WEVT_FIELD_TASK "Task"
+#define WEVT_FIELD_PROCESSID "ProcessID"
+#define WEVT_FIELD_THREADID "ThreadID"
+#define WEVT_FIELD_ACTIVITY_ID "ActivityID"
+#define WEVT_FIELD_RELATED_ACTIVITY_ID "RelatedActivityID"
+#define WEVT_FIELD_XML "XML"
+#define WEVT_FIELD_MESSAGE "Message"
+#define WEVT_FIELD_EVENTS_API "EventsAPI"
+#define WEVT_FIELD_EVENT_DATA_HIDDEN "__HIDDEN__EVENT__DATA__"
+#define WEVT_FIELD_EVENT_MESSAGE_HIDDEN "__HIDDEN__MESSAGE__DATA__"
+#define WEVT_FIELD_EVENT_XML_HIDDEN "__HIDDEN__XML__DATA__"
+
+// functions needed by LQS
+
+// structures needed by LQS
+struct lqs_extension {
+ wchar_t *query;
+
+ struct {
+ struct {
+ size_t completed;
+ size_t total;
+ } queries;
+
+ struct {
+ size_t current_query_total;
+ size_t completed;
+ size_t total;
+ } entries;
+
+ usec_t last_ut;
+ } progress;
+
+ // struct {
+ // usec_t start_ut;
+ // usec_t stop_ut;
+ // usec_t first_msg_ut;
+ //
+ // uint64_t first_msg_seqnum;
+ // } query_file;
+
+ // struct {
+ // uint32_t enable_after_samples;
+ // uint32_t slots;
+ // uint32_t sampled;
+ // uint32_t unsampled;
+ // uint32_t estimated;
+ // } samples;
+
+ // struct {
+ // uint32_t enable_after_samples;
+ // uint32_t every;
+ // uint32_t skipped;
+ // uint32_t recalibrate;
+ // uint32_t sampled;
+ // uint32_t unsampled;
+ // uint32_t estimated;
+ // } samples_per_file;
+
+ // struct {
+ // usec_t start_ut;
+ // usec_t end_ut;
+ // usec_t step_ut;
+ // uint32_t enable_after_samples;
+ // uint32_t sampled[SYSTEMD_JOURNAL_SAMPLING_SLOTS];
+ // uint32_t unsampled[SYSTEMD_JOURNAL_SAMPLING_SLOTS];
+ // } samples_per_time_slot;
+
+ // per file progress info
+ // size_t cached_count;
+
+ // progress statistics
+ usec_t matches_setup_ut;
+ size_t rows_useful;
+ size_t rows_read;
+ size_t bytes_read;
+ size_t files_matched;
+ size_t file_working;
+};
+
+// prepare LQS
+#define LQS_DEFAULT_SLICE_MODE 0
+#define LQS_FUNCTION_NAME WEVT_FUNCTION_NAME
+#define LQS_FUNCTION_DESCRIPTION WEVT_FUNCTION_DESCRIPTION
+#define LQS_DEFAULT_ITEMS_PER_QUERY 200
+#define LQS_DEFAULT_ITEMS_SAMPLING 1000000
+#define LQS_SOURCE_TYPE WEVT_SOURCE_TYPE
+#define LQS_SOURCE_TYPE_ALL WEVTS_ALL
+#define LQS_SOURCE_TYPE_NONE WEVTS_NONE
+#define LQS_PARAMETER_SOURCE_NAME "Event Channels" // this is how it is shown to users
+#define LQS_FUNCTION_GET_INTERNAL_SOURCE_TYPE(value) WEVT_SOURCE_TYPE_2id_one(value)
+#define LQS_FUNCTION_SOURCE_TO_JSON_ARRAY(wb) wevt_sources_to_json_array(wb)
+#include "libnetdata/facets/logs_query_status.h"
+
+#include "windows-events-query-builder.h" // needs the LQS definition, so it has to be last
+
+#endif //NETDATA_WINDOWS_EVENTS_H
diff --git a/src/collectors/windows.plugin/GetSystemUptime.c b/src/collectors/windows.plugin/GetSystemUptime.c
index 9ed939ca0..59bf9d855 100644
--- a/src/collectors/windows.plugin/GetSystemUptime.c
+++ b/src/collectors/windows.plugin/GetSystemUptime.c
@@ -1,34 +1,34 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-#include "windows_plugin.h"
-#include "windows-internals.h"
-
-int do_GetSystemUptime(int update_every, usec_t dt __maybe_unused) {
- ULONGLONG uptime = GetTickCount64(); // in milliseconds
-
- static RRDSET *st = NULL;
- static RRDDIM *rd_uptime = NULL;
- if (!st) {
- st = rrdset_create_localhost(
- "system"
- , "uptime"
- , NULL
- , "uptime"
- , "system.uptime"
- , "System Uptime"
- , "seconds"
- , PLUGIN_WINDOWS_NAME
- , "GetSystemUptime"
- , NETDATA_CHART_PRIO_SYSTEM_UPTIME
- , update_every
- , RRDSET_TYPE_LINE
- );
-
- rd_uptime = rrddim_add(st, "uptime", NULL, 1, 1000, RRD_ALGORITHM_ABSOLUTE);
- }
-
- rrddim_set_by_pointer(st, rd_uptime, (collected_number)uptime);
- rrdset_done(st);
-
- return 0;
-}
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "windows_plugin.h"
+#include "windows-internals.h"
+
+int do_GetSystemUptime(int update_every, usec_t dt __maybe_unused) {
+ ULONGLONG uptime = GetTickCount64(); // in milliseconds
+
+ static RRDSET *st = NULL;
+ static RRDDIM *rd_uptime = NULL;
+ if (!st) {
+ st = rrdset_create_localhost(
+ "system"
+ , "uptime"
+ , NULL
+ , "uptime"
+ , "system.uptime"
+ , "System Uptime"
+ , "seconds"
+ , PLUGIN_WINDOWS_NAME
+ , "GetSystemUptime"
+ , NETDATA_CHART_PRIO_SYSTEM_UPTIME
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ rd_uptime = rrddim_add(st, "uptime", NULL, 1, 1000, RRD_ALGORITHM_ABSOLUTE);
+ }
+
+ rrddim_set_by_pointer(st, rd_uptime, (collected_number)uptime);
+ rrdset_done(st);
+
+ return 0;
+}
diff --git a/src/collectors/windows.plugin/integrations/memory_statistics.md b/src/collectors/windows.plugin/integrations/memory_statistics.md
new file mode 100644
index 000000000..2f67580a6
--- /dev/null
+++ b/src/collectors/windows.plugin/integrations/memory_statistics.md
@@ -0,0 +1,123 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/collectors/windows.plugin/integrations/memory_statistics.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/collectors/windows.plugin/metadata.yaml"
+sidebar_label: "Memory statistics"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Windows Systems"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# Memory statistics
+
+
+<img src="https://netdata.cloud/img/windows.svg" width="150"/>
+
+
+Plugin: windows.plugin
+Module: PerflibMemory
+
+<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
+
+## Overview
+
+This collector monitors swap and memory pool statistics on Windows systems.
+
+
+It queries for the 'Memory' object from Perflib in order to gather the metrics.
+
+
+This collector is only supported on the following platforms:
+
+- windows
+
+This collector only supports collecting metrics from a single instance of this integration.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+The collector automatically detects all of the metrics, no further configuration is required.
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+Metrics grouped by *scope*.
+
+The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
+
+
+
+### Per Memory statistics instance
+
+These metrics refer to the entire monitored instance
+
+This scope has no labels.
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| mem.swap_iops | read, write | operations/s |
+| mem.swap_pages_io | read, write | pages/s |
+| mem.system_pool_size | paged, pool-paged | bytes |
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+No action required.
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `netdata.conf`.
+Configuration for this specific integration is located in the `[plugin:windows]` section within that file.
+
+The file format is a modified INI syntax. The general structure is:
+
+```ini
+[section1]
+ option1 = some value
+ option2 = some other value
+
+[section2]
+ option3 = some third value
+```
+You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the
+Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config netdata.conf
+```
+#### Options
+
+
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| PerflibMemory | An option to enable or disable the data collection. | yes | no |
+
+#### Examples
+There are no configuration examples.
+
+
diff --git a/src/collectors/windows.plugin/integrations/system_statistics.md b/src/collectors/windows.plugin/integrations/system_statistics.md
new file mode 100644
index 000000000..6df183a7a
--- /dev/null
+++ b/src/collectors/windows.plugin/integrations/system_statistics.md
@@ -0,0 +1,123 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/collectors/windows.plugin/integrations/system_statistics.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/collectors/windows.plugin/metadata.yaml"
+sidebar_label: "System statistics"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Windows Systems"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# System statistics
+
+
+<img src="https://netdata.cloud/img/windows.svg" width="150"/>
+
+
+Plugin: windows.plugin
+Module: PerflibProcesses
+
+<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
+
+## Overview
+
+This collector monitors the current number of processes, threads, and context switches on Windows systems.
+
+
+It queries the 'System' object from Perflib in order to gather the metrics.
+
+
+This collector is only supported on the following platforms:
+
+- windows
+
+This collector only supports collecting metrics from a single instance of this integration.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+The collector automatically detects all of the metrics, no further configuration is required.
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+Metrics grouped by *scope*.
+
+The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
+
+
+
+### Per System statistics instance
+
+These metrics refer to the entire monitored instance.
+
+This scope has no labels.
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| system.processes | running | processes |
+| system.threads | threads | threads |
+| system.ctxt | switches | context switches/s |
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+No action required.
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `netdata.conf`.
+Configuration for this specific integration is located in the `[plugin:windows]` section within that file.
+
+The file format is a modified INI syntax. The general structure is:
+
+```ini
+[section1]
+ option1 = some value
+ option2 = some other value
+
+[section2]
+ option3 = some third value
+```
+You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the
+Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config netdata.conf
+```
+#### Options
+
+
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| PerflibProcesses | An option to enable or disable the data collection. | yes | no |
+
+#### Examples
+There are no configuration examples.
+
+
diff --git a/src/collectors/windows.plugin/integrations/system_thermal_zone.md b/src/collectors/windows.plugin/integrations/system_thermal_zone.md
new file mode 100644
index 000000000..6a740b8a0
--- /dev/null
+++ b/src/collectors/windows.plugin/integrations/system_thermal_zone.md
@@ -0,0 +1,121 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/collectors/windows.plugin/integrations/system_thermal_zone.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/collectors/windows.plugin/metadata.yaml"
+sidebar_label: "System thermal zone"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Windows Systems"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# System thermal zone
+
+
+<img src="https://netdata.cloud/img/windows.svg" width="150"/>
+
+
+Plugin: windows.plugin
+Module: PerflibThermalZone
+
+<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
+
+## Overview
+
+This collector monitors thermal zone statistics on Windows systems.
+
+
+It queries for the 'Thermal Zone Information' object from Perflib in order to gather the metrics.
+
+
+This collector is only supported on the following platforms:
+
+- windows
+
+This collector only supports collecting metrics from a single instance of this integration.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+The collector automatically detects all of the metrics, no further configuration is required.
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+Metrics grouped by *scope*.
+
+The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
+
+
+
+### Per Thermal zone
+
+These metrics refer to a Thermal zone
+
+This scope has no labels.
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| system.thermalzone_temperature | temperature | celsius |
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+No action required.
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `netdata.conf`.
+Configuration for this specific integration is located in the `[plugin:windows]` section within that file.
+
+The file format is a modified INI syntax. The general structure is:
+
+```ini
+[section1]
+ option1 = some value
+ option2 = some other value
+
+[section2]
+ option3 = some third value
+```
+You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the
+Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config netdata.conf
+```
+#### Options
+
+
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| PerflibThermalZone | An option to enable or disable the data collection. | yes | no |
+
+#### Examples
+There are no configuration examples.
+
+
diff --git a/src/collectors/windows.plugin/metadata.yaml b/src/collectors/windows.plugin/metadata.yaml
new file mode 100644
index 000000000..52694e03d
--- /dev/null
+++ b/src/collectors/windows.plugin/metadata.yaml
@@ -0,0 +1,276 @@
+plugin_name: windows.plugin
+modules:
+ - meta:
+ plugin_name: windows.plugin
+ module_name: PerflibProcesses
+ monitored_instance:
+ name: System statistics
+ link: "https://learn.microsoft.com/en-us/windows/win32/procthread/processes-and-threads"
+ categories:
+ - data-collection.windows-systems
+ icon_filename: "windows.svg"
+ related_resources:
+ integrations:
+ list: []
+ info_provided_to_referring_integrations:
+ description: ""
+ keywords:
+ - process counts
+ - threads
+ - context switch
+ most_popular: false
+ overview:
+ data_collection:
+ metrics_description: |
+ This collector monitors the current number of processes, threads, and context switches on Windows systems.
+ method_description: |
+ It queries the 'System' object from Perflib in order to gather the metrics.
+ supported_platforms:
+ include: ["windows"]
+ exclude: []
+ multi_instance: false
+ additional_permissions:
+ description: ""
+ default_behavior:
+ auto_detection:
+ description: |
+ The collector automatically detects all of the metrics, no further configuration is required.
+ limits:
+ description: ""
+ performance_impact:
+ description: ""
+ setup:
+ prerequisites:
+ list: []
+ configuration:
+ file:
+ name: "netdata.conf"
+ section_name: "[plugin:windows]"
+ description: "The Netdata main configuration file"
+ options:
+ description: ""
+ folding:
+ title: "Config option"
+ enabled: false
+ list:
+ - name: PerflibProcesses
+ description: An option to enable or disable the data collection.
+ default_value: yes
+ required: false
+ examples:
+ folding:
+ enabled: true
+ title: ""
+ list: []
+ troubleshooting:
+ problems:
+ list: []
+ alerts: []
+ metrics:
+ folding:
+ title: Metrics
+ enabled: false
+ description: ""
+ availability: []
+ scopes:
+ - name: global
+ description: "These metrics refer to the entire monitored instance."
+ labels: []
+ metrics:
+ - name: system.processes
+ description: System Processes
+ unit: "processes"
+ chart_type: line
+ dimensions:
+ - name: running
+ - name: system.threads
+ description: System Threads
+ unit: "threads"
+ chart_type: line
+ dimensions:
+ - name: threads
+ - name: system.ctxt
+ description: CPU Context Switches
+ unit: "context switches/s"
+ chart_type: line
+ dimensions:
+ - name: switches
+ - meta:
+ plugin_name: windows.plugin
+ module_name: PerflibMemory
+ monitored_instance:
+ name: Memory statistics
+ link: "https://learn.microsoft.com/en-us/windows/win32/Memory/memory-management"
+ categories:
+ - data-collection.windows-systems
+ icon_filename: "windows.svg"
+ related_resources:
+ integrations:
+ list: []
+ info_provided_to_referring_integrations:
+ description: ""
+ keywords:
+ - memory
+ - swap
+ most_popular: false
+ overview:
+ data_collection:
+ metrics_description: |
+ This collector monitors swap and memory pool statistics on Windows systems.
+ method_description: |
+ It queries for the 'Memory' object from Perflib in order to gather the metrics.
+ supported_platforms:
+ include: ["windows"]
+ exclude: []
+ multi_instance: false
+ additional_permissions:
+ description: ""
+ default_behavior:
+ auto_detection:
+ description: |
+ The collector automatically detects all of the metrics, no further configuration is required.
+ limits:
+ description: ""
+ performance_impact:
+ description: ""
+ setup:
+ prerequisites:
+ list: []
+ configuration:
+ file:
+ name: "netdata.conf"
+ section_name: "[plugin:windows]"
+ description: "The Netdata main configuration file"
+ options:
+ description: ""
+ folding:
+ title: "Config option"
+ enabled: false
+ list:
+ - name: PerflibMemory
+ description: An option to enable or disable the data collection.
+ default_value: yes
+ required: false
+ examples:
+ folding:
+ enabled: true
+ title: ""
+ list: []
+ troubleshooting:
+ problems:
+ list: []
+ alerts: []
+ metrics:
+ folding:
+ title: Metrics
+ enabled: false
+ description: ""
+ availability: []
+ scopes:
+ - name: global
+ description: "These metrics refer to the entire monitored instance"
+ labels: []
+ metrics:
+ - name: mem.swap_iops
+ description: Swap Operations
+ unit: "operations/s"
+ chart_type: stacked
+ dimensions:
+ - name: read
+ - name: write
+ - name: mem.swap_pages_io
+ description: Swap Pages
+ unit: "pages/s"
+ chart_type: stacked
+ dimensions:
+ - name: read
+ - name: write
+ - name: mem.system_pool_size
+ description: System Memory Pool
+ unit: "bytes"
+ chart_type: stacked
+ dimensions:
+ - name: paged
+ - name: pool-paged
+ - meta:
+ plugin_name: windows.plugin
+ module_name: PerflibThermalZone
+ monitored_instance:
+ name: System thermal zone
+ link: "https://learn.microsoft.com/en-us/windows-hardware/design/device-experiences/design-guide"
+ categories:
+ - data-collection.windows-systems
+ icon_filename: "windows.svg"
+ related_resources:
+ integrations:
+ list: []
+ info_provided_to_referring_integrations:
+ description: ""
+ keywords:
+ - thermal
+ - temperature
+ most_popular: false
+ overview:
+ data_collection:
+ metrics_description: |
+ This collector monitors thermal zone statistics on Windows systems.
+ method_description: |
+ It queries for the 'Thermal Zone Information' object from Perflib in order to gather the metrics.
+ supported_platforms:
+ include: ["windows"]
+ exclude: []
+ multi_instance: false
+ additional_permissions:
+ description: ""
+ default_behavior:
+ auto_detection:
+ description: |
+ The collector automatically detects all of the metrics, no further configuration is required.
+ limits:
+ description: ""
+ performance_impact:
+ description: ""
+ setup:
+ prerequisites:
+ list: []
+ configuration:
+ file:
+ section_name: "[plugin:windows]"
+ name: "netdata.conf"
+ description: "The Netdata main configuration file."
+ options:
+ description: ""
+ folding:
+ title: "Config Option"
+ enabled: false
+ list:
+ - name: PerflibThermalZone
+ description: An option to enable or disable the data collection.
+ default_value: yes
+ required: false
+ examples:
+ folding:
+ enabled: false
+ title: ""
+ list: []
+ troubleshooting:
+ problems:
+ list: []
+ alerts: []
+ metrics:
+ folding:
+ title: Metrics
+ enabled: false
+ description: ""
+ availability: []
+ scopes:
+ - name: Thermal zone
+ description: "These metrics refer to a Thermal zone"
+ labels: []
+ metrics:
+ - name: system.thermalzone_temperature
+ description: Thermal zone temperature
+ unit: celsius
+ chart_type: line
+ dimensions:
+ - name: temperature
diff --git a/src/collectors/windows.plugin/metdata.yaml b/src/collectors/windows.plugin/metdata.yaml
deleted file mode 100644
index 090a48db5..000000000
--- a/src/collectors/windows.plugin/metdata.yaml
+++ /dev/null
@@ -1,92 +0,0 @@
-plugin_name: windows.plugin
-modules:
- - meta:
- plugin_name: proc.plugin
- module_name: PerflibProcesses
- monitored_instance:
- name: System statistics
- link: ""
- categories:
- - data-collection.windows-systems
- icon_filename: "windows.svg"
- related_resources:
- integrations:
- list: [ ]
- info_provided_to_referring_integrations:
- description: ""
- keywords:
- - process counts
- - threads
- most_popular: false
- overview:
- data_collection:
- metrics_description: |
- Perflib provides different statistical methods about Microsoft Windows environment. This collector query for
- Object 'System' to show actual number of processes, threads and context switches.
- method_description: ""
- supported_platforms:
- include: [ "windows" ]
- exclude: [ ]
- multi_instance: false
- additional_permissions:
- description: ""
- default_behavior:
- auto_detection:
- description: |
- The collector auto-detects all metrics. No configuration is needed.
- limits:
- description: ""
- performance_impact:
- description: ""
- setup:
- prerequisites:
- list: [ ]
- configuration:
- file:
- section_name: ""
- name: ""
- description: ""
- options:
- description: ""
- folding:
- title: ""
- enabled: true
- list: [ ]
- examples:
- folding:
- enabled: true
- title: ""
- list: [ ]
- troubleshooting:
- problems:
- list: [ ]
- alerts:
- metrics:
- folding:
- title: Metrics
- enabled: false
- description: ""
- availability: [ ]
- scopes:
- - name: global
- description: ""
- labels: [ ]
- metrics:
- - name: system.processes
- description: System Processes
- unit: "processes"
- chart_type: line
- dimensions:
- - name: running
- - name: system.threads
- description: System Threads
- unit: "threads"
- chart_type: line
- dimensions:
- - name: threads
- - name: system.ctxt
- description: CPU Context Switches
- unit: "context switches/s"
- chart_type: line
- dimensions:
- - name: switches \ No newline at end of file
diff --git a/src/collectors/windows.plugin/perflib-dump.c b/src/collectors/windows.plugin/perflib-dump.c
deleted file mode 100644
index e01813a49..000000000
--- a/src/collectors/windows.plugin/perflib-dump.c
+++ /dev/null
@@ -1,529 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-#include "perflib.h"
-#include "windows-internals.h"
-
-static const char *getCounterType(DWORD CounterType) {
- switch (CounterType) {
- case PERF_COUNTER_COUNTER:
- return "PERF_COUNTER_COUNTER";
-
- case PERF_COUNTER_TIMER:
- return "PERF_COUNTER_TIMER";
-
- case PERF_COUNTER_QUEUELEN_TYPE:
- return "PERF_COUNTER_QUEUELEN_TYPE";
-
- case PERF_COUNTER_LARGE_QUEUELEN_TYPE:
- return "PERF_COUNTER_LARGE_QUEUELEN_TYPE";
-
- case PERF_COUNTER_100NS_QUEUELEN_TYPE:
- return "PERF_COUNTER_100NS_QUEUELEN_TYPE";
-
- case PERF_COUNTER_OBJ_TIME_QUEUELEN_TYPE:
- return "PERF_COUNTER_OBJ_TIME_QUEUELEN_TYPE";
-
- case PERF_COUNTER_BULK_COUNT:
- return "PERF_COUNTER_BULK_COUNT";
-
- case PERF_COUNTER_TEXT:
- return "PERF_COUNTER_TEXT";
-
- case PERF_COUNTER_RAWCOUNT:
- return "PERF_COUNTER_RAWCOUNT";
-
- case PERF_COUNTER_LARGE_RAWCOUNT:
- return "PERF_COUNTER_LARGE_RAWCOUNT";
-
- case PERF_COUNTER_RAWCOUNT_HEX:
- return "PERF_COUNTER_RAWCOUNT_HEX";
-
- case PERF_COUNTER_LARGE_RAWCOUNT_HEX:
- return "PERF_COUNTER_LARGE_RAWCOUNT_HEX";
-
- case PERF_SAMPLE_FRACTION:
- return "PERF_SAMPLE_FRACTION";
-
- case PERF_SAMPLE_COUNTER:
- return "PERF_SAMPLE_COUNTER";
-
- case PERF_COUNTER_NODATA:
- return "PERF_COUNTER_NODATA";
-
- case PERF_COUNTER_TIMER_INV:
- return "PERF_COUNTER_TIMER_INV";
-
- case PERF_SAMPLE_BASE:
- return "PERF_SAMPLE_BASE";
-
- case PERF_AVERAGE_TIMER:
- return "PERF_AVERAGE_TIMER";
-
- case PERF_AVERAGE_BASE:
- return "PERF_AVERAGE_BASE";
-
- case PERF_AVERAGE_BULK:
- return "PERF_AVERAGE_BULK";
-
- case PERF_OBJ_TIME_TIMER:
- return "PERF_OBJ_TIME_TIMER";
-
- case PERF_100NSEC_TIMER:
- return "PERF_100NSEC_TIMER";
-
- case PERF_100NSEC_TIMER_INV:
- return "PERF_100NSEC_TIMER_INV";
-
- case PERF_COUNTER_MULTI_TIMER:
- return "PERF_COUNTER_MULTI_TIMER";
-
- case PERF_COUNTER_MULTI_TIMER_INV:
- return "PERF_COUNTER_MULTI_TIMER_INV";
-
- case PERF_COUNTER_MULTI_BASE:
- return "PERF_COUNTER_MULTI_BASE";
-
- case PERF_100NSEC_MULTI_TIMER:
- return "PERF_100NSEC_MULTI_TIMER";
-
- case PERF_100NSEC_MULTI_TIMER_INV:
- return "PERF_100NSEC_MULTI_TIMER_INV";
-
- case PERF_RAW_FRACTION:
- return "PERF_RAW_FRACTION";
-
- case PERF_LARGE_RAW_FRACTION:
- return "PERF_LARGE_RAW_FRACTION";
-
- case PERF_RAW_BASE:
- return "PERF_RAW_BASE";
-
- case PERF_LARGE_RAW_BASE:
- return "PERF_LARGE_RAW_BASE";
-
- case PERF_ELAPSED_TIME:
- return "PERF_ELAPSED_TIME";
-
- case PERF_COUNTER_HISTOGRAM_TYPE:
- return "PERF_COUNTER_HISTOGRAM_TYPE";
-
- case PERF_COUNTER_DELTA:
- return "PERF_COUNTER_DELTA";
-
- case PERF_COUNTER_LARGE_DELTA:
- return "PERF_COUNTER_LARGE_DELTA";
-
- case PERF_PRECISION_SYSTEM_TIMER:
- return "PERF_PRECISION_SYSTEM_TIMER";
-
- case PERF_PRECISION_100NS_TIMER:
- return "PERF_PRECISION_100NS_TIMER";
-
- case PERF_PRECISION_OBJECT_TIMER:
- return "PERF_PRECISION_OBJECT_TIMER";
-
- default:
- return "UNKNOWN_COUNTER_TYPE";
- }
-}
-
-static const char *getCounterDescription(DWORD CounterType) {
- switch (CounterType) {
- case PERF_COUNTER_COUNTER:
- return "32-bit Counter. Divide delta by delta time. Display suffix: \"/sec\"";
-
- case PERF_COUNTER_TIMER:
- return "64-bit Timer. Divide delta by delta time. Display suffix: \"%\"";
-
- case PERF_COUNTER_QUEUELEN_TYPE:
- case PERF_COUNTER_LARGE_QUEUELEN_TYPE:
- return "Queue Length Space-Time Product. Divide delta by delta time. No Display Suffix";
-
- case PERF_COUNTER_100NS_QUEUELEN_TYPE:
- return "Queue Length Space-Time Product using 100 Ns timebase. Divide delta by delta time. No Display Suffix";
-
- case PERF_COUNTER_OBJ_TIME_QUEUELEN_TYPE:
- return "Queue Length Space-Time Product using Object specific timebase. Divide delta by delta time. No Display Suffix.";
-
- case PERF_COUNTER_BULK_COUNT:
- return "64-bit Counter. Divide delta by delta time. Display Suffix: \"/sec\"";
-
- case PERF_COUNTER_TEXT:
- return "Unicode text Display as text.";
-
- case PERF_COUNTER_RAWCOUNT:
- case PERF_COUNTER_LARGE_RAWCOUNT:
- return "A counter which should not be time averaged on display (such as an error counter on a serial line). Display as is. No Display Suffix.";
-
- case PERF_COUNTER_RAWCOUNT_HEX:
- case PERF_COUNTER_LARGE_RAWCOUNT_HEX:
- return "Special case for RAWCOUNT which should be displayed in hex. A counter which should not be time averaged on display (such as an error counter on a serial line). Display as is. No Display Suffix.";
-
- case PERF_SAMPLE_FRACTION:
- return "A count which is either 1 or 0 on each sampling interrupt (% busy). Divide delta by delta base. Display Suffix: \"%\"";
-
- case PERF_SAMPLE_COUNTER:
- return "A count which is sampled on each sampling interrupt (queue length). Divide delta by delta time. No Display Suffix.";
-
- case PERF_COUNTER_NODATA:
- return "A label: no data is associated with this counter (it has 0 length). Do not display.";
-
- case PERF_COUNTER_TIMER_INV:
- return "64-bit Timer inverse (e.g., idle is measured, but display busy %). Display 100 - delta divided by delta time. Display suffix: \"%\"";
-
- case PERF_SAMPLE_BASE:
- return "The divisor for a sample, used with the previous counter to form a sampled %. You must check for >0 before dividing by this! This counter will directly follow the numerator counter. It should not be displayed to the user.";
-
- case PERF_AVERAGE_TIMER:
- return "A timer which, when divided by an average base, produces a time in seconds which is the average time of some operation. This timer times total operations, and the base is the number of operations. Display Suffix: \"sec\"";
-
- case PERF_AVERAGE_BASE:
- return "Used as the denominator in the computation of time or count averages. Must directly follow the numerator counter. Not displayed to the user.";
-
- case PERF_AVERAGE_BULK:
- return "A bulk count which, when divided (typically) by the number of operations, gives (typically) the number of bytes per operation. No Display Suffix.";
-
- case PERF_OBJ_TIME_TIMER:
- return "64-bit Timer in object specific units. Display delta divided by delta time as returned in the object type header structure. Display suffix: \"%\"";
-
- case PERF_100NSEC_TIMER:
- return "64-bit Timer in 100 nsec units. Display delta divided by delta time. Display suffix: \"%\"";
-
- case PERF_100NSEC_TIMER_INV:
- return "64-bit Timer inverse (e.g., idle is measured, but display busy %). Display 100 - delta divided by delta time. Display suffix: \"%\"";
-
- case PERF_COUNTER_MULTI_TIMER:
- return "64-bit Timer. Divide delta by delta time. Display suffix: \"%\". Timer for multiple instances, so result can exceed 100%.";
-
- case PERF_COUNTER_MULTI_TIMER_INV:
- return "64-bit Timer inverse (e.g., idle is measured, but display busy %). Display 100 * _MULTI_BASE - delta divided by delta time. Display suffix: \"%\" Timer for multiple instances, so result can exceed 100%. Followed by a counter of type _MULTI_BASE.";
-
- case PERF_COUNTER_MULTI_BASE:
- return "Number of instances to which the preceding _MULTI_..._INV counter applies. Used as a factor to get the percentage.";
-
- case PERF_100NSEC_MULTI_TIMER:
- return "64-bit Timer in 100 nsec units. Display delta divided by delta time. Display suffix: \"%\" Timer for multiple instances, so result can exceed 100%.";
-
- case PERF_100NSEC_MULTI_TIMER_INV:
- return "64-bit Timer inverse (e.g., idle is measured, but display busy %). Display 100 * _MULTI_BASE - delta divided by delta time. Display suffix: \"%\" Timer for multiple instances, so result can exceed 100%. Followed by a counter of type _MULTI_BASE.";
-
- case PERF_LARGE_RAW_FRACTION:
- case PERF_RAW_FRACTION:
- return "Indicates the data is a fraction of the following counter which should not be time averaged on display (such as free space over total space.) Display as is. Display the quotient as \"%\"";
-
- case PERF_RAW_BASE:
- case PERF_LARGE_RAW_BASE:
- return "Indicates the data is a base for the preceding counter which should not be time averaged on display (such as free space over total space.)";
-
- case PERF_ELAPSED_TIME:
- return "The data collected in this counter is actually the start time of the item being measured. For display, this data is subtracted from the sample time to yield the elapsed time as the difference between the two. In the definition below, the PerfTime field of the Object contains the sample time as indicated by the PERF_OBJECT_TIMER bit and the difference is scaled by the PerfFreq of the Object to convert the time units into seconds.";
-
- case PERF_COUNTER_HISTOGRAM_TYPE:
- return "Counter type can be used with the preceding types to define a range of values to be displayed in a histogram.";
-
- case PERF_COUNTER_DELTA:
- case PERF_COUNTER_LARGE_DELTA:
- return "This counter is used to display the difference from one sample to the next. The counter value is a constantly increasing number and the value displayed is the difference between the current value and the previous value. Negative numbers are not allowed which shouldn't be a problem as long as the counter value is increasing or unchanged.";
-
- case PERF_PRECISION_SYSTEM_TIMER:
- return "The precision counters are timers that consist of two counter values:\r\n\t1) the count of elapsed time of the event being monitored\r\n\t2) the \"clock\" time in the same units\r\nthe precision timers are used where the standard system timers are not precise enough for accurate readings. It's assumed that the service providing the data is also providing a timestamp at the same time which will eliminate any error that may occur since some small and variable time elapses between the time the system timestamp is captured and when the data is collected from the performance DLL. Only in extreme cases has this been observed to be problematic.\r\nwhen using this type of timer, the definition of the PERF_PRECISION_TIMESTAMP counter must immediately follow the definition of the PERF_PRECISION_*_TIMER in the Object header\r\nThe timer used has the same frequency as the System Performance Timer";
-
- case PERF_PRECISION_100NS_TIMER:
- return "The precision counters are timers that consist of two counter values:\r\n\t1) the count of elapsed time of the event being monitored\r\n\t2) the \"clock\" time in the same units\r\nthe precision timers are used where the standard system timers are not precise enough for accurate readings. It's assumed that the service providing the data is also providing a timestamp at the same time which will eliminate any error that may occur since some small and variable time elapses between the time the system timestamp is captured and when the data is collected from the performance DLL. Only in extreme cases has this been observed to be problematic.\r\nwhen using this type of timer, the definition of the PERF_PRECISION_TIMESTAMP counter must immediately follow the definition of the PERF_PRECISION_*_TIMER in the Object header\r\nThe timer used has the same frequency as the 100 NanoSecond Timer";
-
- case PERF_PRECISION_OBJECT_TIMER:
- return "The precision counters are timers that consist of two counter values:\r\n\t1) the count of elapsed time of the event being monitored\r\n\t2) the \"clock\" time in the same units\r\nthe precision timers are used where the standard system timers are not precise enough for accurate readings. It's assumed that the service providing the data is also providing a timestamp at the same time which will eliminate any error that may occur since some small and variable time elapses between the time the system timestamp is captured and when the data is collected from the performance DLL. Only in extreme cases has this been observed to be problematic.\r\nwhen using this type of timer, the definition of the PERF_PRECISION_TIMESTAMP counter must immediately follow the definition of the PERF_PRECISION_*_TIMER in the Object header\r\nThe timer used is of the frequency specified in the Object header's. PerfFreq field (PerfTime is ignored)";
-
- default:
- return "";
- }
-}
-
-static const char *getCounterAlgorithm(DWORD CounterType) {
- switch (CounterType)
- {
- case PERF_COUNTER_COUNTER:
- case PERF_SAMPLE_COUNTER:
- case PERF_COUNTER_BULK_COUNT:
- return "(data1 - data0) / ((time1 - time0) / frequency)";
-
- case PERF_COUNTER_QUEUELEN_TYPE:
- case PERF_COUNTER_100NS_QUEUELEN_TYPE:
- case PERF_COUNTER_OBJ_TIME_QUEUELEN_TYPE:
- case PERF_COUNTER_LARGE_QUEUELEN_TYPE:
- case PERF_AVERAGE_BULK: // normally not displayed
- return "(data1 - data0) / (time1 - time0)";
-
- case PERF_OBJ_TIME_TIMER:
- case PERF_COUNTER_TIMER:
- case PERF_100NSEC_TIMER:
- case PERF_PRECISION_SYSTEM_TIMER:
- case PERF_PRECISION_100NS_TIMER:
- case PERF_PRECISION_OBJECT_TIMER:
- case PERF_SAMPLE_FRACTION:
- return "100 * (data1 - data0) / (time1 - time0)";
-
- case PERF_COUNTER_TIMER_INV:
- return "100 * (1 - ((data1 - data0) / (time1 - time0)))";
-
- case PERF_100NSEC_TIMER_INV:
- return "100 * (1- (data1 - data0) / (time1 - time0))";
-
- case PERF_COUNTER_MULTI_TIMER:
- return "100 * ((data1 - data0) / ((time1 - time0) / frequency1)) / multi1";
-
- case PERF_100NSEC_MULTI_TIMER:
- return "100 * ((data1 - data0) / (time1 - time0)) / multi1";
-
- case PERF_COUNTER_MULTI_TIMER_INV:
- case PERF_100NSEC_MULTI_TIMER_INV:
- return "100 * (multi1 - ((data1 - data0) / (time1 - time0)))";
-
- case PERF_COUNTER_RAWCOUNT:
- case PERF_COUNTER_LARGE_RAWCOUNT:
- return "data0";
-
- case PERF_COUNTER_RAWCOUNT_HEX:
- case PERF_COUNTER_LARGE_RAWCOUNT_HEX:
- return "hex(data0)";
-
- case PERF_COUNTER_DELTA:
- case PERF_COUNTER_LARGE_DELTA:
- return "data1 - data0";
-
- case PERF_RAW_FRACTION:
- case PERF_LARGE_RAW_FRACTION:
- return "100 * data0 / time0";
-
- case PERF_AVERAGE_TIMER:
- return "((data1 - data0) / frequency1) / (time1 - time0)";
-
- case PERF_ELAPSED_TIME:
- return "(time0 - data0) / frequency0";
-
- case PERF_COUNTER_TEXT:
- case PERF_SAMPLE_BASE:
- case PERF_AVERAGE_BASE:
- case PERF_COUNTER_MULTI_BASE:
- case PERF_RAW_BASE:
- case PERF_COUNTER_NODATA:
- case PERF_PRECISION_TIMESTAMP:
- default:
- return "";
- }
-}
-
-void dumpSystemTime(BUFFER *wb, SYSTEMTIME *st) {
- buffer_json_member_add_uint64(wb, "Year", st->wYear);
- buffer_json_member_add_uint64(wb, "Month", st->wMonth);
- buffer_json_member_add_uint64(wb, "DayOfWeek", st->wDayOfWeek);
- buffer_json_member_add_uint64(wb, "Day", st->wDay);
- buffer_json_member_add_uint64(wb, "Hour", st->wHour);
- buffer_json_member_add_uint64(wb, "Minute", st->wMinute);
- buffer_json_member_add_uint64(wb, "Second", st->wSecond);
- buffer_json_member_add_uint64(wb, "Milliseconds", st->wMilliseconds);
-}
-
-bool dumpDataCb(PERF_DATA_BLOCK *pDataBlock, void *data) {
- char name[4096];
- if(!getSystemName(pDataBlock, name, sizeof(name)))
- strncpyz(name, "[failed]", sizeof(name) - 1);
-
- BUFFER *wb = data;
- buffer_json_member_add_string(wb, "SystemName", name);
-
- // Number of types of objects being reported
- // Type: DWORD
- buffer_json_member_add_int64(wb, "NumObjectTypes", pDataBlock->NumObjectTypes);
-
- buffer_json_member_add_int64(wb, "LittleEndian", pDataBlock->LittleEndian);
-
- // Version and Revision of these data structures.
- // Version starts at 1.
- // Revision starts at 0 for each Version.
- // Type: DWORD
- buffer_json_member_add_int64(wb, "Version", pDataBlock->Version);
- buffer_json_member_add_int64(wb, "Revision", pDataBlock->Revision);
-
- // Object Title Index of default object to display when data from this system is retrieved
- // (-1 = none, but this is not expected to be used)
- // Type: LONG
- buffer_json_member_add_int64(wb, "DefaultObject", pDataBlock->DefaultObject);
-
- // Performance counter frequency at the system under measurement
- // Type: LARGE_INTEGER
- buffer_json_member_add_int64(wb, "PerfFreq", pDataBlock->PerfFreq.QuadPart);
-
- // Performance counter value at the system under measurement
- // Type: LARGE_INTEGER
- buffer_json_member_add_int64(wb, "PerfTime", pDataBlock->PerfTime.QuadPart);
-
- // Performance counter time in 100 nsec units at the system under measurement
- // Type: LARGE_INTEGER
- buffer_json_member_add_int64(wb, "PerfTime100nSec", pDataBlock->PerfTime100nSec.QuadPart);
-
- // Time at the system under measurement in UTC
- // Type: SYSTEMTIME
- buffer_json_member_add_object(wb, "SystemTime");
- dumpSystemTime(wb, &pDataBlock->SystemTime);
- buffer_json_object_close(wb);
-
- if(pDataBlock->NumObjectTypes)
- buffer_json_member_add_array(wb, "Objects");
-
- return true;
-}
-
-static const char *GetDetailLevel(DWORD num) {
- switch (num) {
- case 100:
- return "Novice (100)";
- case 200:
- return "Advanced (200)";
- case 300:
- return "Expert (300)";
- case 400:
- return "Wizard (400)";
-
- default:
- return "Unknown";
- }
-}
-
-bool dumpObjectCb(PERF_DATA_BLOCK *pDataBlock, PERF_OBJECT_TYPE *pObjectType, void *data) {
- (void)pDataBlock;
- BUFFER *wb = data;
- if(!pObjectType) {
- buffer_json_array_close(wb); // instances or counters
- buffer_json_object_close(wb); // objectType
- return true;
- }
-
- buffer_json_add_array_item_object(wb); // objectType
- buffer_json_member_add_int64(wb, "NameId", pObjectType->ObjectNameTitleIndex);
- buffer_json_member_add_string(wb, "Name", RegistryFindNameByID(pObjectType->ObjectNameTitleIndex));
- buffer_json_member_add_int64(wb, "HelpId", pObjectType->ObjectHelpTitleIndex);
- buffer_json_member_add_string(wb, "Help", RegistryFindHelpByID(pObjectType->ObjectHelpTitleIndex));
- buffer_json_member_add_int64(wb, "NumInstances", pObjectType->NumInstances);
- buffer_json_member_add_int64(wb, "NumCounters", pObjectType->NumCounters);
- buffer_json_member_add_int64(wb, "PerfTime", pObjectType->PerfTime.QuadPart);
- buffer_json_member_add_int64(wb, "PerfFreq", pObjectType->PerfFreq.QuadPart);
- buffer_json_member_add_int64(wb, "CodePage", pObjectType->CodePage);
- buffer_json_member_add_int64(wb, "DefaultCounter", pObjectType->DefaultCounter);
- buffer_json_member_add_string(wb, "DetailLevel", GetDetailLevel(pObjectType->DetailLevel));
-
- if(ObjectTypeHasInstances(pDataBlock, pObjectType))
- buffer_json_member_add_array(wb, "Instances");
- else
- buffer_json_member_add_array(wb, "Counters");
-
- return true;
-}
-
-bool dumpInstanceCb(PERF_DATA_BLOCK *pDataBlock, PERF_OBJECT_TYPE *pObjectType, PERF_INSTANCE_DEFINITION *pInstance, void *data) {
- (void)pDataBlock;
- BUFFER *wb = data;
- if(!pInstance) {
- buffer_json_array_close(wb); // counters
- buffer_json_object_close(wb); // instance
- return true;
- }
-
- char name[4096];
- if(!getInstanceName(pDataBlock, pObjectType, pInstance, name, sizeof(name)))
- strncpyz(name, "[failed]", sizeof(name) - 1);
-
- buffer_json_add_array_item_object(wb);
- buffer_json_member_add_string(wb, "Instance", name);
- buffer_json_member_add_int64(wb, "UniqueID", pInstance->UniqueID);
- buffer_json_member_add_array(wb, "Labels");
- {
- buffer_json_add_array_item_object(wb);
- {
- buffer_json_member_add_string(wb, "key", RegistryFindNameByID(pObjectType->ObjectNameTitleIndex));
- buffer_json_member_add_string(wb, "value", name);
- }
- buffer_json_object_close(wb);
-
- if(pInstance->ParentObjectTitleIndex) {
- PERF_INSTANCE_DEFINITION *pi = pInstance;
- while(pi->ParentObjectTitleIndex) {
- PERF_OBJECT_TYPE *po = getObjectTypeByIndex(pDataBlock, pInstance->ParentObjectTitleIndex);
- pi = getInstanceByPosition(pDataBlock, po, pi->ParentObjectInstance);
-
- if(!getInstanceName(pDataBlock, po, pi, name, sizeof(name)))
- strncpyz(name, "[failed]", sizeof(name) - 1);
-
- buffer_json_add_array_item_object(wb);
- {
- buffer_json_member_add_string(wb, "key", RegistryFindNameByID(po->ObjectNameTitleIndex));
- buffer_json_member_add_string(wb, "value", name);
- }
- buffer_json_object_close(wb);
- }
- }
- }
- buffer_json_array_close(wb); // rrdlabels
-
- buffer_json_member_add_array(wb, "Counters");
- return true;
-}
-
-void dumpSample(BUFFER *wb, RAW_DATA *d) {
- buffer_json_member_add_object(wb, "Value");
- buffer_json_member_add_uint64(wb, "data", d->Data);
- buffer_json_member_add_int64(wb, "time", d->Time);
- buffer_json_member_add_uint64(wb, "type", d->CounterType);
- buffer_json_member_add_int64(wb, "multi", d->MultiCounterData);
- buffer_json_member_add_int64(wb, "frequency", d->Frequency);
- buffer_json_object_close(wb);
-}
-
-bool dumpCounterCb(PERF_DATA_BLOCK *pDataBlock, PERF_OBJECT_TYPE *pObjectType, PERF_COUNTER_DEFINITION *pCounter, RAW_DATA *sample, void *data) {
- (void)pDataBlock;
- (void)pObjectType;
- BUFFER *wb = data;
- buffer_json_add_array_item_object(wb);
- buffer_json_member_add_string(wb, "Counter", RegistryFindNameByID(pCounter->CounterNameTitleIndex));
- dumpSample(wb, sample);
- buffer_json_member_add_string(wb, "Help", RegistryFindHelpByID(pCounter->CounterHelpTitleIndex));
- buffer_json_member_add_string(wb, "Type", getCounterType(pCounter->CounterType));
- buffer_json_member_add_string(wb, "Algorithm", getCounterAlgorithm(pCounter->CounterType));
- buffer_json_member_add_string(wb, "Description", getCounterDescription(pCounter->CounterType));
- buffer_json_object_close(wb);
- return true;
-}
-
-bool dumpInstanceCounterCb(PERF_DATA_BLOCK *pDataBlock, PERF_OBJECT_TYPE *pObjectType, PERF_INSTANCE_DEFINITION *pInstance, PERF_COUNTER_DEFINITION *pCounter, RAW_DATA *sample, void *data) {
- (void)pInstance;
- return dumpCounterCb(pDataBlock, pObjectType, pCounter, sample, data);
-}
-
-
-int windows_perflib_dump(const char *key) {
- if(key && !*key)
- key = NULL;
-
- PerflibNamesRegistryInitialize();
-
- DWORD id = 0;
- if(key) {
- id = RegistryFindIDByName(key);
- if(id == PERFLIB_REGISTRY_NAME_NOT_FOUND) {
- fprintf(stderr, "Cannot find key '%s' in Windows Performance Counters Registry.\n", key);
- exit(1);
- }
- }
-
- CLEAN_BUFFER *wb = buffer_create(0, NULL);
- buffer_json_initialize(wb, "\"", "\"", 0, true, BUFFER_JSON_OPTIONS_MINIFY);
-
- perflibQueryAndTraverse(id, dumpDataCb, dumpObjectCb, dumpInstanceCb, dumpInstanceCounterCb, dumpCounterCb, wb);
-
- buffer_json_finalize(wb);
- printf("\n%s\n", buffer_tostring(wb));
-
- perflibFreePerformanceData();
-
- return 0;
-}
diff --git a/src/collectors/windows.plugin/perflib-hyperv.c b/src/collectors/windows.plugin/perflib-hyperv.c
new file mode 100644
index 000000000..523361995
--- /dev/null
+++ b/src/collectors/windows.plugin/perflib-hyperv.c
@@ -0,0 +1,1793 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "windows_plugin.h"
+#include "windows-internals.h"
+
+#define _COMMON_PLUGIN_NAME "windows.plugin"
+#define _COMMON_PLUGIN_MODULE_NAME "PerflibHyperV"
+#include "../common-contexts/common-contexts.h"
+
+#define HYPERV "hyperv"
+
+static void get_and_sanitize_instance_value(
+ PERF_DATA_BLOCK *pDataBlock,
+ PERF_OBJECT_TYPE *pObjectType,
+ PERF_INSTANCE_DEFINITION *pi,
+ char *buffer,
+ size_t buffer_size)
+{
+ // char wstr[8192];
+ if (!getInstanceName(pDataBlock, pObjectType, pi, buffer, buffer_size)) {
+ strncpyz(buffer, "[unknown]", buffer_size - 1);
+ // return;
+ }
+ // rrdlabels_sanitize_value(buffer, wstr, buffer_size);
+}
+
+#define DICT_PERF_OPTION (DICT_OPTION_DONT_OVERWRITE_VALUE | DICT_OPTION_FIXED_SIZE)
+
+#define DEFINE_RD(counter_name) RRDDIM *rd_##counter_name
+
+#define GET_INSTANCE_COUNTER(counter) \
+ do { \
+ perflibGetInstanceCounter(pDataBlock, pObjectType, pi, &p->counter); \
+ } while (0)
+
+#define GET_OBJECT_COUNTER(counter) \
+ do { \
+ perflibGetObjectCounter(pDataBlock, pObjectType, &p->counter); \
+ } while (0)
+
+#define SETP_DIM_VALUE(st, field) \
+ do { \
+ rrddim_set_by_pointer(p->st, p->rd_##field, (collected_number)p->field.current.Data); \
+ } while (0)
+
+typedef bool (*perf_func_collect)(PERF_DATA_BLOCK *pDataBlock, int update_every, void *data);
+
+typedef struct {
+ const char *registry_name;
+ perf_func_collect function_collect;
+ dict_cb_insert_t dict_insert_cb;
+ size_t dict_size;
+ DICTIONARY *instance;
+} hyperv_perf_item;
+
+struct hypervisor_memory {
+ bool collected_metadata;
+ bool charts_created;
+
+ RRDSET *st_pressure;
+ RRDSET *st_vm_memory_physical;
+ RRDSET *st_vm_memory_physical_guest_visible;
+
+ DEFINE_RD(CurrentPressure);
+ DEFINE_RD(PhysicalMemory);
+ DEFINE_RD(GuestVisiblePhysicalMemory);
+ DEFINE_RD(GuestAvailableMemory);
+
+ COUNTER_DATA CurrentPressure;
+ COUNTER_DATA PhysicalMemory;
+ COUNTER_DATA GuestVisiblePhysicalMemory;
+ COUNTER_DATA GuestAvailableMemory;
+};
+
+void initialize_hyperv_memory_keys(struct hypervisor_memory *p) {
+ p->CurrentPressure.key = "Current Pressure";
+ p->PhysicalMemory.key = "Physical Memory";
+ p->GuestVisiblePhysicalMemory.key = "Guest Visible Physical Memory";
+ p->GuestAvailableMemory.key = "Guest Available Memory";
+}
+
+
+void dict_hyperv_memory_insert_cb(const DICTIONARY_ITEM *item __maybe_unused, void *value, void *data __maybe_unused) {
+ struct hypervisor_memory *p = value;
+ initialize_hyperv_memory_keys(p);
+}
+
+struct hypervisor_partition {
+ bool collected_metadata;
+ bool charts_created;
+
+ RRDSET *st_vm_vid_physical_pages_allocated;
+ RRDSET *st_vm_vid_remote_physical_pages;
+
+ DEFINE_RD(PhysicalPagesAllocated);
+ DEFINE_RD(RemotePhysicalPages);
+
+ COUNTER_DATA PhysicalPagesAllocated;
+ COUNTER_DATA RemotePhysicalPages;
+
+};
+
+void initialize_hyperv_partition_keys(struct hypervisor_partition *p)
+{
+ p->PhysicalPagesAllocated.key = "Physical Pages Allocated";
+ p->RemotePhysicalPages.key = "Remote Physical Pages";
+}
+
+void dict_hyperv_partition_insert_cb(const DICTIONARY_ITEM *item __maybe_unused, void *value, void *data __maybe_unused) {
+ struct hypervisor_partition *p = value;
+ initialize_hyperv_partition_keys(p);
+}
+
+static bool do_hyperv_memory(PERF_DATA_BLOCK *pDataBlock, int update_every, void *data)
+{
+ hyperv_perf_item *item = data;
+
+ PERF_OBJECT_TYPE *pObjectType = perflibFindObjectTypeByName(pDataBlock, item->registry_name);
+ if (!pObjectType)
+ return false;
+
+ PERF_INSTANCE_DEFINITION *pi = NULL;
+ for(LONG i = 0; i < pObjectType->NumInstances ; i++) {
+ pi = perflibForEachInstance(pDataBlock, pObjectType, pi);
+ if (!pi)
+ break;
+
+ get_and_sanitize_instance_value(pDataBlock, pObjectType, pi, windows_shared_buffer, sizeof(windows_shared_buffer));
+
+ struct hypervisor_memory *p = dictionary_set(item->instance, windows_shared_buffer, NULL, sizeof(*p));
+
+ if(!p->collected_metadata) {
+ p->collected_metadata = true;
+ }
+
+ GET_INSTANCE_COUNTER(CurrentPressure);
+ GET_INSTANCE_COUNTER(PhysicalMemory);
+ GET_INSTANCE_COUNTER(GuestVisiblePhysicalMemory);
+ GET_INSTANCE_COUNTER(GuestAvailableMemory);
+
+ if (!p->charts_created) {
+ p->charts_created = true;
+ if(!p->st_vm_memory_physical) {
+ p->st_vm_memory_physical = rrdset_create_localhost(
+ "vm_memory_physical",
+ windows_shared_buffer,
+ NULL,
+ HYPERV,
+ HYPERV".vm_memory_physical",
+ "VM assigned memory",
+ "bytes",
+ _COMMON_PLUGIN_NAME,
+ _COMMON_PLUGIN_MODULE_NAME,
+ NETDATA_CHART_PRIO_WINDOWS_HYPERV_VM_MEMORY_PHYSICAL,
+ update_every,
+ RRDSET_TYPE_LINE);
+
+ p->st_vm_memory_physical_guest_visible = rrdset_create_localhost(
+ "vm_memory_physical_guest_visible",
+ windows_shared_buffer,
+ NULL,
+ HYPERV,
+ HYPERV".vm_memory_physical_guest_visible",
+ "VM guest visible memory",
+ "bytes",
+ _COMMON_PLUGIN_NAME,
+ _COMMON_PLUGIN_MODULE_NAME,
+ NETDATA_CHART_PRIO_WINDOWS_HYPERV_VM_MEMORY_PHYSICAL_GUEST_VISIBLE,
+ update_every,
+ RRDSET_TYPE_LINE);
+
+ p->st_pressure = rrdset_create_localhost(
+ "vm_memory_pressure_current",
+ windows_shared_buffer,
+ NULL,
+ HYPERV,
+ HYPERV".vm_memory_pressure_current",
+ "VM Memory Pressure",
+ "percentage",
+ _COMMON_PLUGIN_NAME,
+ _COMMON_PLUGIN_MODULE_NAME,
+ NETDATA_CHART_PRIO_WINDOWS_HYPERV_VM_MEMORY_PRESSURE_CURRENT,
+ update_every,
+ RRDSET_TYPE_LINE);
+
+ p->rd_CurrentPressure = rrddim_add(p->st_pressure, "pressure", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ p->rd_PhysicalMemory = rrddim_add(p->st_vm_memory_physical, "assigned", NULL, 1024 * 1024, 1, RRD_ALGORITHM_ABSOLUTE);
+ p->rd_GuestVisiblePhysicalMemory = rrddim_add(p->st_vm_memory_physical_guest_visible, "visible", NULL, 1024 * 1024, 1, RRD_ALGORITHM_ABSOLUTE);
+ p->rd_GuestAvailableMemory = rrddim_add(p->st_vm_memory_physical_guest_visible, "available", NULL, 1024 * 1024, 1, RRD_ALGORITHM_ABSOLUTE);
+
+ rrdlabels_add(p->st_vm_memory_physical->rrdlabels, "vm_name", windows_shared_buffer, RRDLABEL_SRC_AUTO);
+ rrdlabels_add(p->st_pressure->rrdlabels, "vm_name", windows_shared_buffer, RRDLABEL_SRC_AUTO);
+ rrdlabels_add(p->st_vm_memory_physical_guest_visible->rrdlabels, "vm_name", windows_shared_buffer, RRDLABEL_SRC_AUTO);
+ }
+ }
+
+ SETP_DIM_VALUE(st_pressure, CurrentPressure);
+ SETP_DIM_VALUE(st_vm_memory_physical, PhysicalMemory);
+ SETP_DIM_VALUE(st_vm_memory_physical_guest_visible, GuestVisiblePhysicalMemory);
+ SETP_DIM_VALUE(st_vm_memory_physical_guest_visible, GuestAvailableMemory);
+
+ rrdset_done(p->st_pressure);
+ rrdset_done(p->st_vm_memory_physical);
+ rrdset_done(p->st_vm_memory_physical_guest_visible);
+ }
+
+ return true;
+}
+
+static bool do_hyperv_vid_partition(PERF_DATA_BLOCK *pDataBlock, int update_every, void *data)
+{
+ hyperv_perf_item *item = data;
+ PERF_OBJECT_TYPE *pObjectType = perflibFindObjectTypeByName(pDataBlock, item->registry_name);
+ if (!pObjectType)
+ return false;
+
+ PERF_INSTANCE_DEFINITION *pi = NULL;
+ for(LONG i = 0; i < pObjectType->NumInstances ; i++) {
+ pi = perflibForEachInstance(pDataBlock, pObjectType, pi);
+ if (!pi)
+ break;
+
+ get_and_sanitize_instance_value(pDataBlock, pObjectType, pi, windows_shared_buffer, sizeof(windows_shared_buffer));
+
+ struct hypervisor_partition *p = dictionary_set(item->instance, windows_shared_buffer, NULL, sizeof(*p));
+
+ if(!p->collected_metadata) {
+
+ p->collected_metadata = true;
+ }
+
+ if(strcasecmp(windows_shared_buffer, "_Total") == 0)
+ continue;
+
+ GET_INSTANCE_COUNTER(RemotePhysicalPages);
+ GET_INSTANCE_COUNTER(PhysicalPagesAllocated);
+
+ if (!p->charts_created) {
+ p->charts_created = true;
+
+ p->st_vm_vid_physical_pages_allocated = rrdset_create_localhost(
+ "vm_vid_physical_pages_allocated",
+ windows_shared_buffer,
+ NULL,
+ HYPERV,
+ HYPERV ".vm_vid_physical_pages_allocated",
+ "VM physical pages allocated",
+ "pages",
+ _COMMON_PLUGIN_NAME,
+ _COMMON_PLUGIN_MODULE_NAME,
+ NETDATA_CHART_PRIO_WINDOWS_HYPERV_VM_VID_PHYSICAL_PAGES_ALLOCATED,
+ update_every,
+ RRDSET_TYPE_LINE);
+
+ p->st_vm_vid_remote_physical_pages = rrdset_create_localhost(
+ "vm_vid_remote_physical_pages",
+ windows_shared_buffer,
+ NULL,
+ HYPERV,
+ HYPERV ".vm_vid_remote_physical_pages",
+ "VM physical pages not allocated from the preferred NUMA node",
+ "pages",
+ _COMMON_PLUGIN_NAME,
+ _COMMON_PLUGIN_MODULE_NAME,
+ NETDATA_CHART_PRIO_WINDOWS_HYPERV_VM_VID_REMOTE_PHYSICAL_PAGES,
+ update_every,
+ RRDSET_TYPE_LINE);
+
+ p->rd_PhysicalPagesAllocated = rrddim_add(p->st_vm_vid_physical_pages_allocated, "allocated", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ p->rd_RemotePhysicalPages = rrddim_add(p->st_vm_vid_remote_physical_pages, "remote_physical", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+
+ rrdlabels_add(p->st_vm_vid_physical_pages_allocated->rrdlabels, "vm_name", windows_shared_buffer, RRDLABEL_SRC_AUTO);
+ rrdlabels_add(p->st_vm_vid_remote_physical_pages->rrdlabels, "vm_name", windows_shared_buffer, RRDLABEL_SRC_AUTO);
+ }
+
+ SETP_DIM_VALUE(st_vm_vid_remote_physical_pages, RemotePhysicalPages);
+ SETP_DIM_VALUE(st_vm_vid_physical_pages_allocated, PhysicalPagesAllocated);
+
+ rrdset_done(p->st_vm_vid_physical_pages_allocated);
+ rrdset_done(p->st_vm_vid_remote_physical_pages);
+ }
+
+ return true;
+}
+
+// Define structure for Hyper-V Virtual Machine Health Summary
+static struct hypervisor_health_summary {
+ bool collected_metadata;
+ bool charts_created;
+
+ RRDSET *st_health;
+
+ DEFINE_RD(HealthCritical);
+ DEFINE_RD(HealthOk);
+
+ COUNTER_DATA HealthCritical;
+ COUNTER_DATA HealthOk;
+} health_summary = {
+ .collected_metadata = false,
+ .st_health = NULL,
+ .HealthCritical.key = "Health Critical",
+ .HealthOk.key = "Health Ok"};
+
+// Function to handle "Hyper-V Virtual Machine Health Summary"
+static bool do_hyperv_health_summary(PERF_DATA_BLOCK *pDataBlock, int update_every, void *data)
+{
+ hyperv_perf_item *item = data;
+
+ PERF_OBJECT_TYPE *pObjectType = perflibFindObjectTypeByName(pDataBlock, item->registry_name);
+ if (!pObjectType)
+ return false;
+
+ struct hypervisor_health_summary *p = &health_summary;
+
+ GET_OBJECT_COUNTER(HealthCritical);
+ GET_OBJECT_COUNTER(HealthOk);
+
+ if (!p->charts_created) {
+ p->charts_created = true;
+ p->st_health = rrdset_create_localhost(
+ "vms_health",
+ windows_shared_buffer,
+ NULL,
+ HYPERV,
+ HYPERV ".vms_health",
+ "Virtual machines health status",
+ "vms",
+ _COMMON_PLUGIN_NAME,
+ _COMMON_PLUGIN_MODULE_NAME,
+ NETDATA_CHART_PRIO_WINDOWS_HYPERV_VMS_HEALTH,
+ update_every,
+ RRDSET_TYPE_STACKED);
+
+ p->rd_HealthCritical = rrddim_add(p->st_health, "critical", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ p->rd_HealthOk = rrddim_add(p->st_health, "ok", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ }
+
+ SETP_DIM_VALUE(st_health, HealthCritical);
+ SETP_DIM_VALUE(st_health, HealthOk);
+
+ rrdset_done(p->st_health);
+ return true;
+}
+
+// Define structure for Hyper-V Root Partition Metrics (Device and GPA Space Pages)
+struct hypervisor_root_partition {
+ bool collected_metadata;
+ bool charts_created;
+
+ RRDSET *st_device_space_pages;
+ RRDSET *st_gpa_space_pages;
+ RRDSET *st_gpa_space_modifications;
+ RRDSET *st_attached_devices;
+ RRDSET *st_deposited_pages;
+
+ RRDSET *st_DeviceDMAErrors;
+ RRDSET *st_DeviceInterruptErrors;
+ RRDSET *st_DeviceInterruptThrottleEvents;
+ RRDSET *st_IOΤLBFlushesSec;
+ RRDSET *st_AddressSpaces;
+ RRDSET *st_VirtualTLBPages;
+ RRDSET *st_VirtualTLBFlushEntiresSec;
+
+ DEFINE_RD(DeviceSpacePages4K);
+ DEFINE_RD(DeviceSpacePages2M);
+ DEFINE_RD(DeviceSpacePages1G);
+ DEFINE_RD(GPASpacePages4K);
+ DEFINE_RD(GPASpacePages2M);
+ DEFINE_RD(GPASpacePages1G);
+ DEFINE_RD(GPASpaceModifications);
+
+ DEFINE_RD(AttachedDevices);
+ DEFINE_RD(DepositedPages);
+
+ DEFINE_RD(DeviceDMAErrors);
+ DEFINE_RD(DeviceInterruptErrors);
+ DEFINE_RD(DeviceInterruptThrottleEvents);
+ DEFINE_RD(IOΤLBFlushesSec);
+ DEFINE_RD(AddressSpaces);
+ DEFINE_RD(VirtualTLBPages);
+ DEFINE_RD(VirtualTLBFlushEntiresSec);
+
+ COUNTER_DATA DeviceSpacePages4K;
+ COUNTER_DATA DeviceSpacePages2M;
+ COUNTER_DATA DeviceSpacePages1G;
+ COUNTER_DATA GPASpacePages4K;
+ COUNTER_DATA GPASpacePages2M;
+ COUNTER_DATA GPASpacePages1G;
+ COUNTER_DATA GPASpaceModifications;
+ COUNTER_DATA AttachedDevices;
+ COUNTER_DATA DepositedPages;
+ COUNTER_DATA DeviceDMAErrors;
+ COUNTER_DATA DeviceInterruptErrors;
+ COUNTER_DATA DeviceInterruptThrottleEvents;
+ COUNTER_DATA IOΤLBFlushesSec;
+ COUNTER_DATA AddressSpaces;
+ COUNTER_DATA VirtualTLBPages;
+ COUNTER_DATA VirtualTLBFlushEntiresSec;
+};
+
+// Initialize the keys for the root partition metrics
+void initialize_hyperv_root_partition_keys(struct hypervisor_root_partition *p) {
+ p->DeviceSpacePages4K.key = "4K device pages";
+ p->DeviceSpacePages2M.key = "2M device pages";
+ p->DeviceSpacePages1G.key = "1G device pages";
+
+ p->GPASpacePages4K.key = "4K GPA pages";
+ p->GPASpacePages2M.key = "2M GPA pages";
+ p->GPASpacePages1G.key = "1G GPA pages";
+
+ p->GPASpaceModifications.key = "GPA Space Modifications/sec";
+ p->AttachedDevices.key = "Attached Devices";
+ p->DepositedPages.key = "Deposited Pages";
+
+ p->DeviceDMAErrors.key = "Device DMA Errors";
+ p->DeviceInterruptErrors.key = "Device Interrupt Errors";
+ p->DeviceInterruptThrottleEvents.key = "Device Interrupt Throttle Events";
+ p->IOΤLBFlushesSec.key = "I/O TLB Flushes/sec";
+ p->AddressSpaces.key = "Address Spaces";
+ p->VirtualTLBPages.key = "Virtual TLB Pages";
+ p->VirtualTLBFlushEntiresSec.key = "Virtual TLB Flush Entires/sec";
+}
+
+// Callback function for inserting root partition metrics into the dictionary
+void dict_hyperv_root_partition_insert_cb(const DICTIONARY_ITEM *item __maybe_unused, void *value, void *data __maybe_unused) {
+ struct hypervisor_root_partition *p = value;
+ initialize_hyperv_root_partition_keys(p);
+}
+
+// Function to handle "Hyper-V Hypervisor Root Partition" metrics (Device Space and GPA Space)
+static bool do_hyperv_root_partition(PERF_DATA_BLOCK *pDataBlock, int update_every, void *data)
+{
+ hyperv_perf_item *item = data;
+
+ PERF_OBJECT_TYPE *pObjectType = perflibFindObjectTypeByName(pDataBlock, item->registry_name);
+ if (!pObjectType)
+ return false;
+
+ PERF_INSTANCE_DEFINITION *pi = NULL;
+ for (LONG i = 0; i < pObjectType->NumInstances; i++) {
+ pi = perflibForEachInstance(pDataBlock, pObjectType, pi);
+ if (!pi)
+ break;
+
+ get_and_sanitize_instance_value(pDataBlock, pObjectType, pi, windows_shared_buffer, sizeof(windows_shared_buffer));
+
+ if(strcasecmp(windows_shared_buffer, "_Total") == 0)
+ continue;
+
+ struct hypervisor_root_partition *p = dictionary_set(item->instance, windows_shared_buffer, NULL, sizeof(*p));
+
+ if (!p->collected_metadata) {
+ p->collected_metadata = true;
+ }
+
+ // Fetch counters
+ GET_INSTANCE_COUNTER(DeviceSpacePages4K);
+ GET_INSTANCE_COUNTER(DeviceSpacePages2M);
+ GET_INSTANCE_COUNTER(DeviceSpacePages1G);
+ GET_INSTANCE_COUNTER(GPASpacePages4K);
+ GET_INSTANCE_COUNTER(GPASpacePages2M);
+ GET_INSTANCE_COUNTER(GPASpacePages1G);
+ GET_INSTANCE_COUNTER(GPASpaceModifications);
+ GET_INSTANCE_COUNTER(AttachedDevices);
+ GET_INSTANCE_COUNTER(DepositedPages);
+
+ GET_INSTANCE_COUNTER(DeviceDMAErrors);
+ GET_INSTANCE_COUNTER(DeviceInterruptErrors);
+ GET_INSTANCE_COUNTER(DeviceInterruptThrottleEvents);
+ GET_INSTANCE_COUNTER(IOΤLBFlushesSec);
+ GET_INSTANCE_COUNTER(AddressSpaces);
+ GET_INSTANCE_COUNTER(VirtualTLBPages);
+ GET_INSTANCE_COUNTER(VirtualTLBFlushEntiresSec);
+
+
+ // Create charts
+ if (!p->charts_created) {
+ p->charts_created = true;
+ p->st_device_space_pages = rrdset_create_localhost(
+ "root_partition_device_space_pages",
+ windows_shared_buffer,
+ NULL,
+ HYPERV,
+ HYPERV ".root_partition_device_space_pages",
+ "Root partition device space pages",
+ "pages",
+ _COMMON_PLUGIN_NAME,
+ _COMMON_PLUGIN_MODULE_NAME,
+ NETDATA_CHART_PRIO_WINDOWS_HYPERV_ROOT_PARTITION_DEVICE_SPACE_PAGES,
+ update_every,
+ RRDSET_TYPE_LINE);
+
+ p->rd_DeviceSpacePages4K = rrddim_add(p->st_device_space_pages, "4K", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ p->rd_DeviceSpacePages2M = rrddim_add(p->st_device_space_pages, "2M", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ p->rd_DeviceSpacePages1G = rrddim_add(p->st_device_space_pages, "1G", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+
+ p->st_gpa_space_pages = rrdset_create_localhost(
+ "root_partition_gpa_space_pages",
+ windows_shared_buffer,
+ NULL,
+ HYPERV,
+ HYPERV ".root_partition_gpa_space_pages",
+ "Root partition GPA space pages",
+ "pages",
+ _COMMON_PLUGIN_NAME,
+ _COMMON_PLUGIN_MODULE_NAME,
+ NETDATA_CHART_PRIO_WINDOWS_HYPERV_ROOT_PARTITION_GPA_SPACE_PAGES,
+ update_every,
+ RRDSET_TYPE_LINE);
+
+ p->rd_GPASpacePages4K = rrddim_add(p->st_gpa_space_pages, "4K", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ p->rd_GPASpacePages2M = rrddim_add(p->st_gpa_space_pages, "2M", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ p->rd_GPASpacePages1G = rrddim_add(p->st_gpa_space_pages, "1G", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+
+ p->st_gpa_space_modifications = rrdset_create_localhost(
+ "root_partition_gpa_space_modifications",
+ windows_shared_buffer,
+ NULL,
+ HYPERV,
+ HYPERV ".root_partition_gpa_space_modifications",
+ "Root partition GPA space modifications",
+ "modifications/s",
+ _COMMON_PLUGIN_NAME,
+ _COMMON_PLUGIN_MODULE_NAME,
+ NETDATA_CHART_PRIO_WINDOWS_HYPERV_ROOT_PARTITION_GPA_SPACE_MODIFICATIONS,
+ update_every,
+ RRDSET_TYPE_LINE);
+
+ p->rd_GPASpaceModifications =
+ rrddim_add(p->st_gpa_space_modifications, "gpa", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+
+ p->st_attached_devices = rrdset_create_localhost(
+ "root_partition_attached_devices",
+ windows_shared_buffer,
+ NULL,
+ HYPERV,
+ HYPERV ".root_partition_attached_devices",
+ "Root partition attached devices",
+ "devices",
+ _COMMON_PLUGIN_NAME,
+ _COMMON_PLUGIN_MODULE_NAME,
+ NETDATA_CHART_PRIO_WINDOWS_HYPERV_ROOT_PARTITION_ATTACHED_DEVICES,
+ update_every,
+ RRDSET_TYPE_LINE);
+
+ p->rd_AttachedDevices = rrddim_add(p->st_attached_devices, "attached", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+
+ p->st_deposited_pages = rrdset_create_localhost(
+ "root_partition_deposited_pages",
+ windows_shared_buffer,
+ NULL,
+ HYPERV,
+ HYPERV ".root_partition_deposited_pages",
+ "Root partition deposited pages",
+ "pages",
+ _COMMON_PLUGIN_NAME,
+ _COMMON_PLUGIN_MODULE_NAME,
+ NETDATA_CHART_PRIO_WINDOWS_HYPERV_ROOT_PARTITION_DEPOSITED_PAGES,
+ update_every,
+ RRDSET_TYPE_LINE);
+
+ p->rd_DepositedPages = rrddim_add(p->st_deposited_pages, "gpa", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+
+ p->st_DeviceDMAErrors = rrdset_create_localhost(
+ "root_partition_device_dma_errors",
+ windows_shared_buffer,
+ NULL,
+ HYPERV,
+ HYPERV ".root_partition_device_dma_errors",
+ "Root partition illegal DMA requests",
+ "requests",
+ _COMMON_PLUGIN_NAME,
+ _COMMON_PLUGIN_MODULE_NAME,
+ NETDATA_CHART_PRIO_WINDOWS_HYPERV_ROOT_PARTITION_DEVICE_DMA_ERRORS,
+ update_every,
+ RRDSET_TYPE_LINE);
+
+ p->rd_DeviceDMAErrors =
+ rrddim_add(p->st_DeviceDMAErrors, "illegal_dma", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+
+ p->st_DeviceInterruptErrors = rrdset_create_localhost(
+ "root_partition_device_interrupt_errors",
+ windows_shared_buffer,
+ NULL,
+ HYPERV,
+ HYPERV ".root_partition_device_interrupt_errors",
+ "Root partition illegal interrupt requestss",
+ "requests",
+ _COMMON_PLUGIN_NAME,
+ _COMMON_PLUGIN_MODULE_NAME,
+ NETDATA_CHART_PRIO_WINDOWS_HYPERV_ROOT_PARTITION_DEVICE_INTERRUPT_ERRORS,
+ update_every,
+ RRDSET_TYPE_LINE);
+
+ p->rd_DeviceInterruptErrors =
+ rrddim_add(p->st_DeviceInterruptErrors, "illegal_interrupt", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+
+ p->st_DeviceInterruptThrottleEvents = rrdset_create_localhost(
+ "root_partition_device_interrupt_throttle_events",
+ windows_shared_buffer,
+ NULL,
+ HYPERV,
+ HYPERV ".root_partition_device_interrupt_throttle_events",
+ "Root partition throttled interrupts",
+ "events",
+ _COMMON_PLUGIN_NAME,
+ _COMMON_PLUGIN_MODULE_NAME,
+ NETDATA_CHART_PRIO_WINDOWS_HYPERV_ROOT_PARTITION_DEVICE_INTERRUPT_THROTTLE_EVENTS,
+ update_every,
+ RRDSET_TYPE_LINE);
+
+ p->rd_DeviceInterruptThrottleEvents =
+ rrddim_add(p->st_DeviceInterruptThrottleEvents, "throttling", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+
+ p->st_IOΤLBFlushesSec = rrdset_create_localhost(
+ "root_partition_io_tlb_flush",
+ windows_shared_buffer,
+ NULL,
+ HYPERV,
+ HYPERV ".root_partition_io_tlb_flush",
+ "Root partition flushes of I/O TLBs",
+ "flushes/s",
+ _COMMON_PLUGIN_NAME,
+ _COMMON_PLUGIN_MODULE_NAME,
+ NETDATA_CHART_PRIO_WINDOWS_HYPERV_ROOT_PARTITION_IO_TLB_FLUSH,
+ update_every,
+ RRDSET_TYPE_LINE);
+
+ p->rd_IOΤLBFlushesSec = rrddim_add(p->st_IOΤLBFlushesSec, "gpa", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+
+ p->st_AddressSpaces = rrdset_create_localhost(
+ "root_partition_address_space",
+ windows_shared_buffer,
+ NULL,
+ HYPERV,
+ HYPERV ".root_partition_address_space",
+ "Root partition address spaces in the virtual TLB",
+ "address spaces",
+ _COMMON_PLUGIN_NAME,
+ _COMMON_PLUGIN_MODULE_NAME,
+ NETDATA_CHART_PRIO_WINDOWS_HYPERV_ROOT_PARTITION_ADDRESS_SPACE,
+ update_every,
+ RRDSET_TYPE_LINE);
+
+ p->rd_AddressSpaces = rrddim_add(p->st_AddressSpaces, "address_spaces", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+
+ p->st_VirtualTLBPages = rrdset_create_localhost(
+ "root_partition_virtual_tlb_pages",
+ windows_shared_buffer,
+ NULL,
+ HYPERV,
+ HYPERV ".root_partition_virtual_tlb_pages",
+ "Root partition pages used by the virtual TLB",
+ "pages",
+ _COMMON_PLUGIN_NAME,
+ _COMMON_PLUGIN_MODULE_NAME,
+ NETDATA_CHART_PRIO_WINDOWS_HYPERV_ROOT_PARTITION_VIRTUAL_TLB_PAGES,
+ update_every,
+ RRDSET_TYPE_LINE);
+
+ p->rd_VirtualTLBPages = rrddim_add(p->st_VirtualTLBPages, "used", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+
+ p->st_VirtualTLBFlushEntiresSec = rrdset_create_localhost(
+ "root_partition_virtual_tlb_flush_entries",
+ windows_shared_buffer,
+ NULL,
+ HYPERV,
+ HYPERV ".root_partition_virtual_tlb_flush_entries",
+ "Root partition flushes of the entire virtual TLB",
+ "flushes/s",
+ _COMMON_PLUGIN_NAME,
+ _COMMON_PLUGIN_MODULE_NAME,
+ NETDATA_CHART_PRIO_WINDOWS_HYPERV_ROOT_PARTITION_VIRTUAL_TLB_FLUSH_ENTRIES,
+ update_every,
+ RRDSET_TYPE_LINE);
+
+ p->rd_VirtualTLBFlushEntiresSec =
+ rrddim_add(p->st_VirtualTLBFlushEntiresSec, "flushes", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ }
+
+ // Set the data for each dimension
+
+ SETP_DIM_VALUE(st_device_space_pages,DeviceSpacePages4K);
+ SETP_DIM_VALUE(st_device_space_pages,DeviceSpacePages2M);
+ SETP_DIM_VALUE(st_device_space_pages,DeviceSpacePages1G);
+
+ SETP_DIM_VALUE(st_gpa_space_pages, GPASpacePages4K);
+ SETP_DIM_VALUE(st_gpa_space_pages, GPASpacePages2M);
+ SETP_DIM_VALUE(st_gpa_space_pages, GPASpacePages1G);
+
+ SETP_DIM_VALUE(st_gpa_space_modifications, GPASpaceModifications);
+
+ SETP_DIM_VALUE(st_attached_devices, AttachedDevices);
+ SETP_DIM_VALUE(st_deposited_pages, DepositedPages);
+
+ SETP_DIM_VALUE(st_DeviceDMAErrors, DeviceDMAErrors);
+ SETP_DIM_VALUE(st_DeviceInterruptErrors, DeviceInterruptErrors);
+ SETP_DIM_VALUE(st_DeviceInterruptThrottleEvents, DeviceInterruptThrottleEvents);
+ SETP_DIM_VALUE(st_IOΤLBFlushesSec, IOΤLBFlushesSec);
+ SETP_DIM_VALUE(st_AddressSpaces, AddressSpaces);
+ SETP_DIM_VALUE(st_VirtualTLBPages, VirtualTLBPages);
+ SETP_DIM_VALUE(st_VirtualTLBFlushEntiresSec, VirtualTLBFlushEntiresSec);
+
+ // Mark the charts as done
+ rrdset_done(p->st_device_space_pages);
+ rrdset_done(p->st_gpa_space_pages);
+ rrdset_done(p->st_gpa_space_modifications);
+ rrdset_done(p->st_attached_devices);
+ rrdset_done(p->st_deposited_pages);
+ rrdset_done(p->st_DeviceInterruptErrors);
+ rrdset_done(p->st_DeviceInterruptThrottleEvents);
+ rrdset_done(p->st_IOΤLBFlushesSec);
+ rrdset_done(p->st_AddressSpaces);
+ rrdset_done(p->st_DeviceDMAErrors);
+ rrdset_done(p->st_VirtualTLBPages);
+ rrdset_done(p->st_VirtualTLBFlushEntiresSec);
+ }
+
+ return true;
+}
+
+// Storage DEVICE
+
+struct hypervisor_storage_device {
+ bool collected_metadata;
+ bool charts_created;
+
+ RRDSET *st_operations;
+ DEFINE_RD(ReadOperationsSec);
+ DEFINE_RD(WriteOperationsSec);
+
+ RRDSET *st_bytes;
+ DEFINE_RD(ReadBytesSec);
+ DEFINE_RD(WriteBytesSec);
+
+ RRDSET *st_errors;
+ DEFINE_RD(ErrorCount);
+
+ COUNTER_DATA ReadOperationsSec;
+ COUNTER_DATA WriteOperationsSec;
+
+ COUNTER_DATA ReadBytesSec;
+ COUNTER_DATA WriteBytesSec;
+ COUNTER_DATA ErrorCount;
+};
+
+
+// Initialize the keys for the root partition metrics
+void initialize_hyperv_storage_device_keys(struct hypervisor_storage_device *p) {
+ p->ReadOperationsSec.key = "Read Operations/Sec";
+ p->WriteOperationsSec.key = "Write Operations/Sec";
+
+ p->ReadBytesSec.key = "Read Bytes/sec";
+ p->WriteBytesSec.key = "Write Bytes/sec";
+ p->ErrorCount.key = "Error Count";
+}
+
+// Callback function for inserting root partition metrics into the dictionary
+void dict_hyperv_storage_device_insert_cb(const DICTIONARY_ITEM *item __maybe_unused, void *value, void *data __maybe_unused) {
+ struct hypervisor_storage_device *p = value;
+ initialize_hyperv_storage_device_keys(p);
+}
+
+static bool do_hyperv_storage_device(PERF_DATA_BLOCK *pDataBlock, int update_every, void *data)
+{
+ hyperv_perf_item *item = data;
+
+ PERF_OBJECT_TYPE *pObjectType = perflibFindObjectTypeByName(pDataBlock, item->registry_name);
+ if (!pObjectType)
+ return false;
+
+
+ PERF_INSTANCE_DEFINITION *pi = NULL;
+ for (LONG i = 0; i < pObjectType->NumInstances; i++) {
+ pi = perflibForEachInstance(pDataBlock, pObjectType, pi);
+ if (!pi)
+ break;
+
+ get_and_sanitize_instance_value(pDataBlock, pObjectType, pi, windows_shared_buffer, sizeof(windows_shared_buffer));
+
+ if(strcasecmp(windows_shared_buffer, "_Total") == 0)
+ continue;
+
+ struct hypervisor_storage_device *p = dictionary_set(item->instance, windows_shared_buffer, NULL, sizeof(*p));
+
+ if (!p->collected_metadata) {
+ p->collected_metadata = true;
+ }
+
+ // Fetch counters
+ GET_INSTANCE_COUNTER(ReadOperationsSec);
+ GET_INSTANCE_COUNTER(WriteOperationsSec);
+
+ GET_INSTANCE_COUNTER(ReadBytesSec);
+ GET_INSTANCE_COUNTER(WriteBytesSec);
+ GET_INSTANCE_COUNTER(ErrorCount);
+
+ if (!p->charts_created) {
+ p->charts_created = true;
+ if (!p->st_operations) {
+ p->st_operations = rrdset_create_localhost(
+ "vm_storage_device_operations",
+ windows_shared_buffer,
+ NULL,
+ HYPERV,
+ HYPERV".vm_storage_device_operations",
+ "VM storage device IOPS",
+ "operations/s",
+ _COMMON_PLUGIN_NAME,
+ _COMMON_PLUGIN_MODULE_NAME,
+ NETDATA_CHART_PRIO_WINDOWS_HYPERV_VM_STORAGE_DEVICE_OPERATIONS,
+ update_every,
+ RRDSET_TYPE_LINE);
+
+ p->rd_ReadOperationsSec = rrddim_add(p->st_operations, "read", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ p->rd_WriteOperationsSec = rrddim_add(p->st_operations, "write", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+
+ rrdlabels_add(p->st_operations->rrdlabels, "vm_storage_device", windows_shared_buffer, RRDLABEL_SRC_AUTO);
+ }
+
+ if (!p->st_bytes) {
+ p->st_bytes = rrdset_create_localhost(
+ "vm_storage_device_bytes",
+ windows_shared_buffer,
+ NULL,
+ HYPERV,
+ HYPERV".vm_storage_device_bytes",
+ "VM storage device IO",
+ "bytes/s",
+ _COMMON_PLUGIN_NAME,
+ _COMMON_PLUGIN_MODULE_NAME,
+ NETDATA_CHART_PRIO_WINDOWS_HYPERV_VM_STORAGE_DEVICE_BYTES,
+ update_every,
+ RRDSET_TYPE_AREA);
+
+ p->rd_ReadBytesSec = rrddim_add(p->st_bytes, "read", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ p->rd_WriteBytesSec = rrddim_add(p->st_bytes, "write", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+
+ rrdlabels_add(p->st_bytes->rrdlabels, "vm_storage_device", windows_shared_buffer, RRDLABEL_SRC_AUTO);
+ }
+
+ if (!p->st_errors) {
+ p->st_errors = rrdset_create_localhost(
+ "vm_storage_device_errors",
+ windows_shared_buffer,
+ NULL,
+ HYPERV,
+ HYPERV".vm_storage_device_errors",
+ "VM storage device errors",
+ "errors/s",
+ _COMMON_PLUGIN_NAME,
+ _COMMON_PLUGIN_MODULE_NAME,
+ NETDATA_CHART_PRIO_WINDOWS_HYPERV_VM_STORAGE_DEVICE_ERRORS,
+ update_every,
+ RRDSET_TYPE_LINE);
+
+ p->rd_ErrorCount = rrddim_add(p->st_errors, "errors", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+
+ rrdlabels_add(p->st_errors->rrdlabels, "vm_storage_device", windows_shared_buffer, RRDLABEL_SRC_AUTO);
+ }
+ }
+
+ SETP_DIM_VALUE(st_operations,ReadOperationsSec);
+ SETP_DIM_VALUE(st_operations,WriteOperationsSec);
+
+ SETP_DIM_VALUE(st_bytes,ReadBytesSec);
+ SETP_DIM_VALUE(st_bytes,WriteBytesSec);
+
+ SETP_DIM_VALUE(st_errors,ErrorCount);
+
+ // Mark the charts as done
+ rrdset_done(p->st_operations);
+ rrdset_done(p->st_bytes);
+ rrdset_done(p->st_errors);
+ }
+
+ return true;
+}
+
+struct hypervisor_switch {
+ bool collected_metadata;
+ bool charts_created;
+
+ RRDSET *st_bytes;
+ DEFINE_RD(BytesSentSec);
+ DEFINE_RD(BytesReceivedSec);
+
+ RRDSET *st_packets;
+ DEFINE_RD(PacketsSentSec);
+ DEFINE_RD(PacketsReceivedSec);
+
+ RRDSET *st_directed_packets;
+ DEFINE_RD(DirectedPacketsSentSec);
+ DEFINE_RD(DirectedPacketsReceivedSec);
+
+ RRDSET *st_broadcast_packets;
+ DEFINE_RD(BroadcastPacketsSentSec);
+ DEFINE_RD(BroadcastPacketsReceivedSec);
+
+ RRDSET *st_multicast_packets;
+ DEFINE_RD(MulticastPacketsSentSec);
+ DEFINE_RD(MulticastPacketsReceivedSec);
+
+ RRDSET *st_dropped_packets;
+ DEFINE_RD(DroppedPacketsOutgoingSec);
+ DEFINE_RD(DroppedPacketsIncomingSec);
+
+ RRDSET *st_ext_dropped_packets;
+ DEFINE_RD(ExtensionsDroppedPacketsOutgoingSec);
+ DEFINE_RD(ExtensionsDroppedPacketsIncomingSec);
+
+ RRDSET *st_flooded;
+ DEFINE_RD(PacketsFlooded);
+
+ RRDSET *st_learned_mac;
+ DEFINE_RD(LearnedMacAddresses);
+
+ RRDSET *st_purged_mac;
+ DEFINE_RD(PurgedMacAddresses);
+
+ COUNTER_DATA BytesSentSec;
+ COUNTER_DATA BytesReceivedSec;
+
+ COUNTER_DATA PacketsSentSec;
+ COUNTER_DATA PacketsReceivedSec;
+
+ COUNTER_DATA DirectedPacketsSentSec;
+ COUNTER_DATA DirectedPacketsReceivedSec;
+
+ COUNTER_DATA BroadcastPacketsSentSec;
+ COUNTER_DATA BroadcastPacketsReceivedSec;
+
+ COUNTER_DATA MulticastPacketsSentSec;
+ COUNTER_DATA MulticastPacketsReceivedSec;
+
+ COUNTER_DATA DroppedPacketsOutgoingSec;
+ COUNTER_DATA DroppedPacketsIncomingSec;
+
+ COUNTER_DATA ExtensionsDroppedPacketsOutgoingSec;
+ COUNTER_DATA ExtensionsDroppedPacketsIncomingSec;
+
+ COUNTER_DATA PacketsFlooded;
+
+ COUNTER_DATA LearnedMacAddresses;
+
+ COUNTER_DATA PurgedMacAddresses;
+};
+
+// Initialize the keys for the root partition metrics
+void initialize_hyperv_switch_keys(struct hypervisor_switch *p)
+{
+ p->BytesSentSec.key = "Bytes Sent/sec";
+ p->BytesReceivedSec.key = "Bytes Received/sec";
+ p->PacketsSentSec.key = "Packets Sent/sec";
+ p->PacketsReceivedSec.key = "Packets Received/sec";
+
+ p->DirectedPacketsSentSec.key = "Directed Packets Sent/sec";
+ p->DirectedPacketsReceivedSec.key = "Directed Packets Received/sec";
+ p->BroadcastPacketsSentSec.key = "Broadcast Packets Sent/sec";
+ p->BroadcastPacketsReceivedSec.key = "Broadcast Packets Received/sec";
+ p->MulticastPacketsSentSec.key = "Multicast Packets Sent/sec";
+ p->MulticastPacketsReceivedSec.key = "Multicast Packets Received/sec";
+ p->DroppedPacketsOutgoingSec.key = "Dropped Packets Outgoing/sec";
+ p->DroppedPacketsIncomingSec.key = "Dropped Packets Incoming/sec";
+ p->ExtensionsDroppedPacketsOutgoingSec.key = "Extensions Dropped Packets Outgoing/sec";
+ p->ExtensionsDroppedPacketsIncomingSec.key = "Extensions Dropped Packets Incoming/sec";
+ p->PacketsFlooded.key = "Packets Flooded";
+ p->LearnedMacAddresses.key = "Learned Mac Addresses";
+ p->PurgedMacAddresses.key = "Purged Mac Addresses";
+}
+
+void dict_hyperv_switch_insert_cb(const DICTIONARY_ITEM *item __maybe_unused, void *value, void *data __maybe_unused)
+{
+ struct hypervisor_switch *p = value;
+ initialize_hyperv_switch_keys(p);
+}
+
+static bool do_hyperv_switch(PERF_DATA_BLOCK *pDataBlock, int update_every, void *data)
+{
+ hyperv_perf_item *item = data;
+
+ PERF_OBJECT_TYPE *pObjectType = perflibFindObjectTypeByName(pDataBlock, item->registry_name);
+ if (!pObjectType)
+ return false;
+
+ PERF_INSTANCE_DEFINITION *pi = NULL;
+ for (LONG i = 0; i < pObjectType->NumInstances; i++) {
+ static bool charts_created = false;
+ pi = perflibForEachInstance(pDataBlock, pObjectType, pi);
+ if (!pi)
+ break;
+
+ get_and_sanitize_instance_value(
+ pDataBlock, pObjectType, pi, windows_shared_buffer, sizeof(windows_shared_buffer));
+
+ if(strcasecmp(windows_shared_buffer, "_Total") == 0)
+ continue;
+
+ struct hypervisor_switch *p = dictionary_set(item->instance, windows_shared_buffer, NULL, sizeof(*p));
+
+ if (!p->collected_metadata) {
+ p->collected_metadata = true;
+ }
+
+ GET_INSTANCE_COUNTER(BytesReceivedSec);
+ GET_INSTANCE_COUNTER(BytesSentSec);
+
+ GET_INSTANCE_COUNTER(PacketsReceivedSec);
+ GET_INSTANCE_COUNTER(PacketsSentSec);
+
+ GET_INSTANCE_COUNTER(DirectedPacketsSentSec);
+ GET_INSTANCE_COUNTER(DirectedPacketsReceivedSec);
+
+ GET_INSTANCE_COUNTER(BroadcastPacketsSentSec);
+ GET_INSTANCE_COUNTER(BroadcastPacketsReceivedSec);
+
+ GET_INSTANCE_COUNTER(MulticastPacketsSentSec);
+ GET_INSTANCE_COUNTER(MulticastPacketsReceivedSec);
+
+ GET_INSTANCE_COUNTER(DroppedPacketsOutgoingSec);
+ GET_INSTANCE_COUNTER(DroppedPacketsIncomingSec);
+
+ GET_INSTANCE_COUNTER(ExtensionsDroppedPacketsOutgoingSec);
+ GET_INSTANCE_COUNTER(ExtensionsDroppedPacketsIncomingSec);
+
+ GET_INSTANCE_COUNTER(PacketsFlooded);
+
+ GET_INSTANCE_COUNTER(LearnedMacAddresses);
+
+ GET_INSTANCE_COUNTER(PurgedMacAddresses);
+
+ if (!p->charts_created) {
+ p->charts_created = true;
+
+ p->st_bytes = rrdset_create_localhost(
+ "vswitch_traffic",
+ windows_shared_buffer,
+ NULL,
+ HYPERV,
+ HYPERV ".vswitch_traffic",
+ "Virtual switch traffic",
+ "kilobits/s",
+ _COMMON_PLUGIN_NAME,
+ _COMMON_PLUGIN_MODULE_NAME,
+ NETDATA_CHART_PRIO_WINDOWS_HYPERV_VSWITCH_TRAFFIC,
+ update_every,
+ RRDSET_TYPE_AREA);
+
+ p->rd_BytesReceivedSec = rrddim_add(p->st_bytes, "received", NULL, 8, 1000, RRD_ALGORITHM_INCREMENTAL);
+ p->rd_BytesSentSec = rrddim_add(p->st_bytes, "sent", NULL, -8, 1000, RRD_ALGORITHM_INCREMENTAL);
+ rrdlabels_add(p->st_bytes->rrdlabels, "vswitch", windows_shared_buffer, RRDLABEL_SRC_AUTO);
+
+ p->st_packets = rrdset_create_localhost(
+ "vswitch_packets",
+ windows_shared_buffer,
+ NULL,
+ HYPERV,
+ HYPERV ".vswitch_packets",
+ "Virtual switch packets",
+ "packets/s",
+ _COMMON_PLUGIN_NAME,
+ _COMMON_PLUGIN_MODULE_NAME,
+ NETDATA_CHART_PRIO_WINDOWS_HYPERV_VSWITCH_PACKETS,
+ update_every,
+ RRDSET_TYPE_LINE);
+
+ p->rd_PacketsReceivedSec = rrddim_add(p->st_packets, "received", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ p->rd_PacketsSentSec = rrddim_add(p->st_packets, "sent", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rrdlabels_add(p->st_packets->rrdlabels, "vswitch", windows_shared_buffer, RRDLABEL_SRC_AUTO);
+
+ p->st_directed_packets = rrdset_create_localhost(
+ "vswitch_directed_packets",
+ windows_shared_buffer,
+ NULL,
+ HYPERV,
+ HYPERV ".vswitch_directed_packets",
+ "Virtual switch directed packets",
+ "packets/s",
+ _COMMON_PLUGIN_NAME,
+ _COMMON_PLUGIN_MODULE_NAME,
+ NETDATA_CHART_PRIO_WINDOWS_HYPERV_VSWITCH_DIRECTED_PACKETS,
+ update_every,
+ RRDSET_TYPE_LINE);
+
+ p->rd_DirectedPacketsReceivedSec =
+ rrddim_add(p->st_directed_packets, "received", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ p->rd_DirectedPacketsSentSec =
+ rrddim_add(p->st_directed_packets, "sent", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rrdlabels_add(p->st_directed_packets->rrdlabels, "vswitch", windows_shared_buffer, RRDLABEL_SRC_AUTO);
+
+ p->st_broadcast_packets = rrdset_create_localhost(
+ "vswitch_broadcast_packets",
+ windows_shared_buffer,
+ NULL,
+ HYPERV,
+ HYPERV ".vswitch_broadcast_packets",
+ "Virtual switch broadcast packets",
+ "packets/s",
+ _COMMON_PLUGIN_NAME,
+ _COMMON_PLUGIN_MODULE_NAME,
+ NETDATA_CHART_PRIO_WINDOWS_HYPERV_VSWITCH_BROADCAST_PACKETS,
+ update_every,
+ RRDSET_TYPE_LINE);
+
+ p->rd_BroadcastPacketsReceivedSec =
+ rrddim_add(p->st_broadcast_packets, "received", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ p->rd_BroadcastPacketsSentSec =
+ rrddim_add(p->st_broadcast_packets, "sent", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rrdlabels_add(p->st_broadcast_packets->rrdlabels, "vswitch", windows_shared_buffer, RRDLABEL_SRC_AUTO);
+
+ p->st_multicast_packets = rrdset_create_localhost(
+ "vswitch_multicast_packets",
+ windows_shared_buffer,
+ NULL,
+ HYPERV,
+ HYPERV ".vswitch_multicast_packets",
+ "Virtual switch multicast packets",
+ "packets/s",
+ _COMMON_PLUGIN_NAME,
+ _COMMON_PLUGIN_MODULE_NAME,
+ NETDATA_CHART_PRIO_WINDOWS_HYPERV_VSWITCH_MULTICAST_PACKETS,
+ update_every,
+ RRDSET_TYPE_LINE);
+
+ p->rd_MulticastPacketsReceivedSec =
+ rrddim_add(p->st_multicast_packets, "received", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ p->rd_MulticastPacketsSentSec =
+ rrddim_add(p->st_multicast_packets, "sent", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rrdlabels_add(p->st_multicast_packets->rrdlabels, "vswitch", windows_shared_buffer, RRDLABEL_SRC_AUTO);
+
+ p->st_dropped_packets = rrdset_create_localhost(
+ "vswitch_dropped_packets",
+ windows_shared_buffer,
+ NULL,
+ HYPERV,
+ HYPERV ".vswitch_dropped_packets",
+ "Virtual switch dropped packets",
+ "drops/s",
+ _COMMON_PLUGIN_NAME,
+ _COMMON_PLUGIN_MODULE_NAME,
+ NETDATA_CHART_PRIO_WINDOWS_HYPERV_VSWITCH_DROPPED_PACKETS,
+ update_every,
+ RRDSET_TYPE_LINE);
+
+ p->rd_DroppedPacketsIncomingSec =
+ rrddim_add(p->st_dropped_packets, "incoming", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ p->rd_DroppedPacketsOutgoingSec =
+ rrddim_add(p->st_dropped_packets, "outgoing", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rrdlabels_add(p->st_dropped_packets->rrdlabels, "vswitch", windows_shared_buffer, RRDLABEL_SRC_AUTO);
+
+ p->st_ext_dropped_packets = rrdset_create_localhost(
+ "vswitch_extensions_dropped_packets",
+ windows_shared_buffer,
+ NULL,
+ HYPERV,
+ HYPERV ".vswitch_extensions_dropped_packets",
+ "Virtual switch extensions dropped packets",
+ "drops/s",
+ _COMMON_PLUGIN_NAME,
+ _COMMON_PLUGIN_MODULE_NAME,
+ NETDATA_CHART_PRIO_WINDOWS_HYPERV_VSWITCH_EXTENSIONS_DROPPED_PACKETS,
+ update_every,
+ RRDSET_TYPE_LINE);
+
+ p->rd_ExtensionsDroppedPacketsIncomingSec =
+ rrddim_add(p->st_ext_dropped_packets, "incoming", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ p->rd_ExtensionsDroppedPacketsOutgoingSec =
+ rrddim_add(p->st_ext_dropped_packets, "outgoing", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rrdlabels_add(p->st_ext_dropped_packets->rrdlabels, "vswitch", windows_shared_buffer, RRDLABEL_SRC_AUTO);
+
+ p->st_flooded = rrdset_create_localhost(
+ "vswitch_packets_flooded",
+ windows_shared_buffer,
+ NULL,
+ HYPERV,
+ HYPERV ".vswitch_packets_flooded",
+ "Virtual switch flooded packets",
+ "packets/s",
+ _COMMON_PLUGIN_NAME,
+ _COMMON_PLUGIN_MODULE_NAME,
+ NETDATA_CHART_PRIO_WINDOWS_HYPERV_VSWITCH_PACKETS_FLOODED,
+ update_every,
+ RRDSET_TYPE_LINE);
+
+ p->rd_PacketsFlooded = rrddim_add(p->st_flooded, "flooded", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rrdlabels_add(p->st_flooded->rrdlabels, "vswitch", windows_shared_buffer, RRDLABEL_SRC_AUTO);
+
+ p->st_learned_mac = rrdset_create_localhost(
+ "vswitch_learned_mac_addresses",
+ windows_shared_buffer,
+ NULL,
+ HYPERV,
+ HYPERV ".vswitch_learned_mac_addresses",
+ "Virtual switch learned MAC addresses",
+ "mac addresses/s",
+ _COMMON_PLUGIN_NAME,
+ _COMMON_PLUGIN_MODULE_NAME,
+ NETDATA_CHART_PRIO_WINDOWS_HYPERV_VSWITCH_LEARNED_MAC_ADDRESSES,
+ update_every,
+ RRDSET_TYPE_LINE);
+
+ p->rd_LearnedMacAddresses = rrddim_add(p->st_learned_mac, "learned", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rrdlabels_add(p->st_learned_mac->rrdlabels, "vswitch", windows_shared_buffer, RRDLABEL_SRC_AUTO);
+
+ p->st_purged_mac = rrdset_create_localhost(
+ "vswitch_purged_mac_addresses",
+ windows_shared_buffer,
+ NULL,
+ HYPERV,
+ HYPERV ".vswitch_purged_mac_addresses",
+ "Virtual switch purged MAC addresses",
+ "mac addresses/s",
+ _COMMON_PLUGIN_NAME,
+ _COMMON_PLUGIN_MODULE_NAME,
+ NETDATA_CHART_PRIO_WINDOWS_HYPERV_VSWITCH_PURGED_MAC_ADDRESSES,
+ update_every,
+ RRDSET_TYPE_LINE);
+
+ p->rd_PurgedMacAddresses = rrddim_add(p->st_purged_mac, "purged", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rrdlabels_add(p->st_purged_mac->rrdlabels, "vswitch", windows_shared_buffer, RRDLABEL_SRC_AUTO);
+ }
+
+ SETP_DIM_VALUE(st_packets, PacketsReceivedSec);
+ SETP_DIM_VALUE(st_packets, PacketsSentSec);
+
+ SETP_DIM_VALUE(st_bytes, BytesReceivedSec);
+ SETP_DIM_VALUE(st_bytes, BytesSentSec);
+
+ SETP_DIM_VALUE(st_directed_packets, DirectedPacketsSentSec);
+ SETP_DIM_VALUE(st_directed_packets, DirectedPacketsReceivedSec);
+
+ SETP_DIM_VALUE(st_broadcast_packets, BroadcastPacketsSentSec);
+ SETP_DIM_VALUE(st_broadcast_packets, BroadcastPacketsReceivedSec);
+
+ SETP_DIM_VALUE(st_multicast_packets, MulticastPacketsSentSec);
+ SETP_DIM_VALUE(st_multicast_packets, MulticastPacketsReceivedSec);
+
+ SETP_DIM_VALUE(st_dropped_packets, DroppedPacketsOutgoingSec);
+ SETP_DIM_VALUE(st_dropped_packets, DroppedPacketsIncomingSec);
+
+ SETP_DIM_VALUE(st_ext_dropped_packets, ExtensionsDroppedPacketsOutgoingSec);
+ SETP_DIM_VALUE(st_ext_dropped_packets, ExtensionsDroppedPacketsIncomingSec);
+
+ SETP_DIM_VALUE(st_flooded, PacketsFlooded);
+ SETP_DIM_VALUE(st_learned_mac, LearnedMacAddresses);
+ SETP_DIM_VALUE(st_purged_mac, PurgedMacAddresses);
+
+ // Mark the charts as done
+ rrdset_done(p->st_packets);
+ rrdset_done(p->st_bytes);
+
+ rrdset_done(p->st_directed_packets);
+ rrdset_done(p->st_broadcast_packets);
+ rrdset_done(p->st_multicast_packets);
+ rrdset_done(p->st_dropped_packets);
+ rrdset_done(p->st_ext_dropped_packets);
+ rrdset_done(p->st_flooded);
+ rrdset_done(p->st_learned_mac);
+ rrdset_done(p->st_purged_mac);
+
+ }
+ return true;
+}
+
+struct hypervisor_network_adapter {
+ bool collected_metadata;
+ bool charts_created;
+
+ RRDSET *st_dropped_packets;
+ DEFINE_RD(DroppedPacketsOutgoingSec);
+ DEFINE_RD(DroppedPacketsIncomingSec);
+
+ RRDSET *st_send_receive_packets;
+ DEFINE_RD(PacketsSentSec);
+ DEFINE_RD(PacketsReceivedSec);
+
+ RRDSET *st_send_receive_bytes;
+ DEFINE_RD(BytesSentSec);
+ DEFINE_RD(BytesReceivedSec);
+
+ RRDSET *st_IPsecoffloadBytes;
+ DEFINE_RD(IPsecoffloadBytesReceivedSec);
+ DEFINE_RD(IPsecoffloadBytesSentSec);
+
+ RRDSET *st_DirectedPackets;
+ DEFINE_RD(DirectedPacketsSentSec);
+ DEFINE_RD(DirectedPacketsReceivedSec);
+
+ RRDSET *st_BroadcastPackets;
+ DEFINE_RD(BroadcastPacketsSentSec);
+ DEFINE_RD(BroadcastPacketsReceivedSec);
+
+ RRDSET *st_MulticastPackets;
+ DEFINE_RD(MulticastPacketsSentSec);
+ DEFINE_RD(MulticastPacketsReceivedSec);
+
+ COUNTER_DATA DroppedPacketsOutgoingSec;
+ COUNTER_DATA DroppedPacketsIncomingSec;
+
+ COUNTER_DATA PacketsSentSec;
+ COUNTER_DATA PacketsReceivedSec;
+
+ COUNTER_DATA BytesSentSec;
+ COUNTER_DATA BytesReceivedSec;
+
+ COUNTER_DATA IPsecoffloadBytesReceivedSec;
+ COUNTER_DATA IPsecoffloadBytesSentSec;
+
+ COUNTER_DATA DirectedPacketsSentSec;
+ COUNTER_DATA DirectedPacketsReceivedSec;
+
+ COUNTER_DATA BroadcastPacketsSentSec;
+ COUNTER_DATA BroadcastPacketsReceivedSec;
+
+ COUNTER_DATA MulticastPacketsSentSec;
+ COUNTER_DATA MulticastPacketsReceivedSec;
+};
+
+// Initialize the keys for the root partition metrics
+void initialize_hyperv_network_adapter_keys(struct hypervisor_network_adapter *p)
+{
+ p->DroppedPacketsOutgoingSec.key = "Dropped Packets Outgoing/sec";
+ p->DroppedPacketsIncomingSec.key = "Dropped Packets Incoming/sec";
+
+ p->PacketsSentSec.key = "Packets Sent/sec";
+ p->PacketsReceivedSec.key = "Packets Received/sec";
+
+ p->BytesSentSec.key = "Bytes Sent/sec";
+ p->BytesReceivedSec.key = "Bytes Received/sec";
+
+ p->IPsecoffloadBytesReceivedSec.key = "IPsec offload Bytes Receive/sec";
+ p->IPsecoffloadBytesSentSec.key = "IPsec offload Bytes Sent/sec";
+ p->DirectedPacketsSentSec.key = "Directed Packets Sent/sec";
+ p->DirectedPacketsReceivedSec.key = "Directed Packets Received/sec";
+ p->BroadcastPacketsSentSec.key = "Broadcast Packets Sent/sec";
+ p->BroadcastPacketsReceivedSec.key = "Broadcast Packets Received/sec";
+ p->MulticastPacketsSentSec.key = "Multicast Packets Sent/sec";
+ p->MulticastPacketsReceivedSec.key = "Multicast Packets Received/sec";
+}
+
+void dict_hyperv_network_adapter_insert_cb(const DICTIONARY_ITEM *item __maybe_unused, void *value, void *data __maybe_unused)
+{
+ struct hypervisor_network_adapter *p = value;
+ initialize_hyperv_network_adapter_keys(p);
+}
+
+static bool do_hyperv_network_adapter(PERF_DATA_BLOCK *pDataBlock, int update_every, void *data)
+{
+ hyperv_perf_item *item = data;
+
+ PERF_OBJECT_TYPE *pObjectType = perflibFindObjectTypeByName(pDataBlock, item->registry_name);
+ if (!pObjectType)
+ return false;
+
+ PERF_INSTANCE_DEFINITION *pi = NULL;
+ for (LONG i = 0; i < pObjectType->NumInstances; i++) {
+ pi = perflibForEachInstance(pDataBlock, pObjectType, pi);
+ if (!pi)
+ break;
+
+ get_and_sanitize_instance_value(pDataBlock, pObjectType, pi, windows_shared_buffer, sizeof(windows_shared_buffer));
+
+ if(strcasecmp(windows_shared_buffer, "_Total") == 0)
+ continue;
+
+ struct hypervisor_network_adapter *p = dictionary_set(item->instance, windows_shared_buffer, NULL, sizeof(*p));
+
+ if (!p->collected_metadata) {
+ p->collected_metadata = true;
+ }
+
+ GET_INSTANCE_COUNTER(DroppedPacketsIncomingSec);
+ GET_INSTANCE_COUNTER(DroppedPacketsOutgoingSec);
+
+ GET_INSTANCE_COUNTER(PacketsReceivedSec);
+ GET_INSTANCE_COUNTER(PacketsSentSec);
+
+ GET_INSTANCE_COUNTER(BytesReceivedSec);
+ GET_INSTANCE_COUNTER(BytesSentSec);
+
+ GET_INSTANCE_COUNTER(IPsecoffloadBytesReceivedSec);
+ GET_INSTANCE_COUNTER(IPsecoffloadBytesSentSec);
+
+ GET_INSTANCE_COUNTER(DirectedPacketsSentSec);
+ GET_INSTANCE_COUNTER(DirectedPacketsReceivedSec);
+
+ GET_INSTANCE_COUNTER(BroadcastPacketsSentSec);
+ GET_INSTANCE_COUNTER(BroadcastPacketsReceivedSec);
+
+ GET_INSTANCE_COUNTER(MulticastPacketsSentSec);
+ GET_INSTANCE_COUNTER(MulticastPacketsReceivedSec);
+
+ if (!p->charts_created) {
+ p->charts_created = true;
+ p->st_dropped_packets = rrdset_create_localhost(
+ "vm_net_interface_packets_dropped",
+ windows_shared_buffer,
+ NULL,
+ HYPERV,
+ HYPERV".vm_net_interface_packets_dropped",
+ "VM interface packets dropped",
+ "drops/s",
+ _COMMON_PLUGIN_NAME,
+ _COMMON_PLUGIN_MODULE_NAME,
+ NETDATA_CHART_PRIO_WINDOWS_HYPERV_VM_NET_INTERFACE_PACKETS_DROPPED,
+ update_every,
+ RRDSET_TYPE_LINE);
+
+ p->rd_DroppedPacketsIncomingSec = rrddim_add(p->st_dropped_packets, "incoming", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ p->rd_DroppedPacketsOutgoingSec = rrddim_add(p->st_dropped_packets, "outgoing", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+
+ rrdlabels_add(p->st_dropped_packets->rrdlabels, "vm_net_interface", windows_shared_buffer, RRDLABEL_SRC_AUTO);
+
+ p->st_send_receive_packets = rrdset_create_localhost(
+ "vm_net_interface_packets",
+ windows_shared_buffer,
+ NULL,
+ HYPERV,
+ HYPERV ".vm_net_interface_packets",
+ "VM interface packets",
+ "packets/s",
+ _COMMON_PLUGIN_NAME,
+ _COMMON_PLUGIN_MODULE_NAME,
+ NETDATA_CHART_PRIO_WINDOWS_HYPERV_VM_NET_INTERFACE_PACKETS,
+ update_every,
+ RRDSET_TYPE_LINE);
+
+ p->rd_PacketsReceivedSec = rrddim_add(p->st_send_receive_packets, "received", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ p->rd_PacketsSentSec = rrddim_add(p->st_send_receive_packets, "sent", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+
+ rrdlabels_add(p->st_send_receive_packets->rrdlabels, "vm_net_interface", windows_shared_buffer, RRDLABEL_SRC_AUTO);
+
+ p->st_send_receive_bytes = rrdset_create_localhost(
+ "vm_net_interface_traffic",
+ windows_shared_buffer,
+ NULL,
+ HYPERV,
+ HYPERV ".vm_net_interface_traffic",
+ "VM interface traffic",
+ "kilobits/s",
+ _COMMON_PLUGIN_NAME,
+ _COMMON_PLUGIN_MODULE_NAME,
+ NETDATA_CHART_PRIO_WINDOWS_HYPERV_VM_NET_INTERFACE_TRAFFIC,
+ update_every,
+ RRDSET_TYPE_AREA);
+
+ p->rd_BytesReceivedSec = rrddim_add(p->st_send_receive_bytes, "received", NULL, 8, 1000, RRD_ALGORITHM_INCREMENTAL);
+ p->rd_BytesSentSec = rrddim_add(p->st_send_receive_bytes, "sent", NULL, -8, 1000, RRD_ALGORITHM_INCREMENTAL);
+ rrdlabels_add(p->st_send_receive_bytes->rrdlabels, "vm_net_interface", windows_shared_buffer, RRDLABEL_SRC_AUTO);
+
+ p->st_IPsecoffloadBytes = rrdset_create_localhost(
+ "vm_net_interface_ipsec_traffic",
+ windows_shared_buffer,
+ NULL,
+ HYPERV,
+ HYPERV ".vm_net_interface_ipsec_traffic",
+ "VM interface traffic",
+ "kilobits/s",
+ _COMMON_PLUGIN_NAME,
+ _COMMON_PLUGIN_MODULE_NAME,
+ NETDATA_CHART_PRIO_WINDOWS_HYPERV_VM_NET_INTERFACE_IPSEC_TRAFFIC,
+ update_every,
+ RRDSET_TYPE_AREA);
+
+ p->rd_IPsecoffloadBytesReceivedSec =
+ rrddim_add(p->st_IPsecoffloadBytes, "received", NULL, 8, 1000, RRD_ALGORITHM_INCREMENTAL);
+ p->rd_IPsecoffloadBytesSentSec =
+ rrddim_add(p->st_IPsecoffloadBytes, "sent", NULL, -8, 1000, RRD_ALGORITHM_INCREMENTAL);
+ rrdlabels_add(
+ p->st_IPsecoffloadBytes->rrdlabels, "vm_net_interface", windows_shared_buffer, RRDLABEL_SRC_AUTO);
+
+ p->st_DirectedPackets = rrdset_create_localhost(
+ "vm_net_interface_directed_packets",
+ windows_shared_buffer,
+ NULL,
+ HYPERV,
+ HYPERV ".vm_net_interface_directed_packets",
+ "VM interface traffic",
+ "packets/s",
+ _COMMON_PLUGIN_NAME,
+ _COMMON_PLUGIN_MODULE_NAME,
+ NETDATA_CHART_PRIO_WINDOWS_HYPERV_VM_NET_INTERFACE_DIRECTED_PACKETS,
+ update_every,
+ RRDSET_TYPE_LINE);
+
+ p->rd_DirectedPacketsReceivedSec =
+ rrddim_add(p->st_DirectedPackets, "received", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ p->rd_DirectedPacketsSentSec =
+ rrddim_add(p->st_DirectedPackets, "sent", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rrdlabels_add(
+ p->st_DirectedPackets->rrdlabels, "vm_net_interface", windows_shared_buffer, RRDLABEL_SRC_AUTO);
+
+ p->st_BroadcastPackets = rrdset_create_localhost(
+ "vm_net_interface_broadcast_packets",
+ windows_shared_buffer,
+ NULL,
+ HYPERV,
+ HYPERV ".vm_net_interface_broadcast_packets",
+ "VM interface broadcast",
+ "packets/s",
+ _COMMON_PLUGIN_NAME,
+ _COMMON_PLUGIN_MODULE_NAME,
+ NETDATA_CHART_PRIO_WINDOWS_HYPERV_VM_NET_INTERFACE_PACKETS,
+ update_every,
+ RRDSET_TYPE_LINE);
+
+ p->rd_BroadcastPacketsReceivedSec =
+ rrddim_add(p->st_BroadcastPackets, "received", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ p->rd_BroadcastPacketsSentSec =
+ rrddim_add(p->st_BroadcastPackets, "sent", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rrdlabels_add(
+ p->st_BroadcastPackets->rrdlabels, "vm_net_interface", windows_shared_buffer, RRDLABEL_SRC_AUTO);
+
+ p->st_MulticastPackets = rrdset_create_localhost(
+ "vm_net_interface_multicast_packets",
+ windows_shared_buffer,
+ NULL,
+ HYPERV,
+ HYPERV ".vm_net_interface_multicast_packets",
+ "VM interface multicast",
+ "packets/s",
+ _COMMON_PLUGIN_NAME,
+ _COMMON_PLUGIN_MODULE_NAME,
+ NETDATA_CHART_PRIO_WINDOWS_HYPERV_VM_NET_INTERFACE_MULTICAST_PACKETS,
+ update_every,
+ RRDSET_TYPE_LINE);
+
+ p->rd_MulticastPacketsReceivedSec =
+ rrddim_add(p->st_MulticastPackets, "received", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ p->rd_MulticastPacketsSentSec =
+ rrddim_add(p->st_MulticastPackets, "sent", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rrdlabels_add(
+ p->st_MulticastPackets->rrdlabels, "vm_net_interface", windows_shared_buffer, RRDLABEL_SRC_AUTO);
+
+
+ }
+
+ SETP_DIM_VALUE(st_dropped_packets, DroppedPacketsIncomingSec);
+ SETP_DIM_VALUE(st_dropped_packets, DroppedPacketsOutgoingSec);
+
+ SETP_DIM_VALUE(st_send_receive_packets, PacketsReceivedSec);
+ SETP_DIM_VALUE(st_send_receive_packets, PacketsSentSec);
+
+ SETP_DIM_VALUE(st_send_receive_bytes, BytesReceivedSec);
+ SETP_DIM_VALUE(st_send_receive_bytes, BytesSentSec);
+
+ SETP_DIM_VALUE(st_IPsecoffloadBytes, IPsecoffloadBytesReceivedSec);
+ SETP_DIM_VALUE(st_IPsecoffloadBytes, IPsecoffloadBytesSentSec);
+
+ SETP_DIM_VALUE(st_DirectedPackets, DirectedPacketsSentSec);
+ SETP_DIM_VALUE(st_DirectedPackets, DirectedPacketsReceivedSec);
+
+ SETP_DIM_VALUE(st_BroadcastPackets, BroadcastPacketsSentSec);
+ SETP_DIM_VALUE(st_BroadcastPackets, BroadcastPacketsReceivedSec);
+
+ SETP_DIM_VALUE(st_MulticastPackets,MulticastPacketsSentSec);
+ SETP_DIM_VALUE(st_MulticastPackets,MulticastPacketsReceivedSec);
+
+ rrdset_done(p->st_IPsecoffloadBytes);
+ rrdset_done(p->st_DirectedPackets);
+ rrdset_done(p->st_BroadcastPackets);
+ rrdset_done(p->st_MulticastPackets);
+ rrdset_done(p->st_send_receive_bytes);
+ rrdset_done(p->st_send_receive_packets);
+ rrdset_done(p->st_dropped_packets);
+ }
+ return true;
+}
+
+
+// Hypervisor Virtual Processor
+struct hypervisor_processor {
+ bool collected_metadata;
+ bool charts_created;
+
+ RRDSET *st_HypervisorProcessor;
+
+ DEFINE_RD(GuestRunTime);
+ DEFINE_RD(HypervisorRunTime);
+ DEFINE_RD(RemoteRunTime);
+
+ RRDSET *st_HypervisorProcessorTotal;
+ DEFINE_RD(TotalRunTime);
+
+ COUNTER_DATA GuestRunTime;
+ COUNTER_DATA HypervisorRunTime;
+ COUNTER_DATA RemoteRunTime;
+ COUNTER_DATA TotalRunTime;
+ collected_number GuestRunTime_total;
+ collected_number HypervisorRunTime_total;
+ collected_number RemoteRunTime_total;
+ collected_number TotalRunTime_total;
+};
+
+
+void initialize_hyperv_processor_keys(struct hypervisor_processor *p)
+{
+ p->GuestRunTime.key = "% Guest Run Time";
+ p->HypervisorRunTime.key = "% Hypervisor Run Time";
+ p->RemoteRunTime.key = "% Remote Run Time";
+ p->TotalRunTime.key = "% Total Run Time";
+ p->GuestRunTime_total = 0;
+ p->HypervisorRunTime_total = 0;
+ p->RemoteRunTime_total = 0;
+ p->TotalRunTime_total = 0;
+}
+
+void dict_hyperv_processor_insert_cb(const DICTIONARY_ITEM *item __maybe_unused, void *value, void *data __maybe_unused)
+{
+ struct hypervisor_processor *p = value;
+ initialize_hyperv_processor_keys(p);
+}
+
+static bool do_hyperv_processor(PERF_DATA_BLOCK *pDataBlock, int update_every, void *data)
+{
+ hyperv_perf_item *item = data;
+
+ PERF_OBJECT_TYPE *pObjectType = perflibFindObjectTypeByName(pDataBlock, item->registry_name);
+ if (!pObjectType)
+ return false;
+
+ PERF_INSTANCE_DEFINITION *pi = NULL;
+ for (LONG i = 0; i < pObjectType->NumInstances; i++) {
+ pi = perflibForEachInstance(pDataBlock, pObjectType, pi);
+ if (!pi)
+ break;
+
+ get_and_sanitize_instance_value(pDataBlock, pObjectType, pi, windows_shared_buffer, sizeof(windows_shared_buffer));
+
+ if (strcasecmp(windows_shared_buffer, "_Total") == 0)
+ continue;
+
+ char *vm = strchr(windows_shared_buffer, ':');
+ if (vm)
+ *vm = '\0';
+
+ struct hypervisor_processor *p = dictionary_set(item->instance, windows_shared_buffer, NULL, sizeof(*p));
+
+ if (!p->collected_metadata) {
+ p->collected_metadata = true;
+ }
+
+ GET_INSTANCE_COUNTER(GuestRunTime);
+ GET_INSTANCE_COUNTER(HypervisorRunTime);
+ GET_INSTANCE_COUNTER(RemoteRunTime);
+ GET_INSTANCE_COUNTER(TotalRunTime);
+
+ if (!p->charts_created) {
+ p->charts_created = true;
+ p->st_HypervisorProcessorTotal = rrdset_create_localhost(
+ "vm_cpu_usage",
+ windows_shared_buffer,
+ NULL,
+ HYPERV,
+ HYPERV ".vm_cpu_usage",
+ "VM CPU usage",
+ "percentage",
+ _COMMON_PLUGIN_NAME,
+ _COMMON_PLUGIN_MODULE_NAME,
+ NETDATA_CHART_PRIO_WINDOWS_HYPERV_VM_CPU_USAGE,
+ update_every,
+ RRDSET_TYPE_STACKED);
+
+ p->rd_TotalRunTime =
+ rrddim_add(p->st_HypervisorProcessorTotal, "usage", NULL, 1, 1000000, RRD_ALGORITHM_INCREMENTAL);
+ rrdlabels_add(p->st_HypervisorProcessorTotal->rrdlabels, "vm_name", windows_shared_buffer, RRDLABEL_SRC_AUTO);
+
+ p->st_HypervisorProcessor = rrdset_create_localhost(
+ "vm_cpu_usage_by_run_context",
+ windows_shared_buffer,
+ NULL,
+ HYPERV,
+ HYPERV ".vm_cpu_usage_by_run_context",
+ "VM CPU usage by run context",
+ "percentage",
+ _COMMON_PLUGIN_NAME,
+ _COMMON_PLUGIN_MODULE_NAME,
+ NETDATA_CHART_PRIO_WINDOWS_HYPERV_VM_CPU_USAGE_BY_RUN_CONTEXT,
+ update_every,
+ RRDSET_TYPE_STACKED);
+
+ p->rd_GuestRunTime =
+ rrddim_add(p->st_HypervisorProcessor, "guest", NULL, 1, 1000000, RRD_ALGORITHM_INCREMENTAL);
+ p->rd_HypervisorRunTime =
+ rrddim_add(p->st_HypervisorProcessor, "hypervisor", NULL, 1, 1000000, RRD_ALGORITHM_INCREMENTAL);
+ p->rd_RemoteRunTime =
+ rrddim_add(p->st_HypervisorProcessor, "remote", NULL, 1, 1000000, RRD_ALGORITHM_INCREMENTAL);
+
+ rrdlabels_add(p->st_HypervisorProcessor->rrdlabels, "vm_name", windows_shared_buffer, RRDLABEL_SRC_AUTO);
+ }
+
+ p->GuestRunTime_total += (collected_number)p->GuestRunTime.current.Data;
+ p->HypervisorRunTime_total += (collected_number)p->HypervisorRunTime.current.Data;
+ p->RemoteRunTime_total += (collected_number)p->RemoteRunTime.current.Data;
+ p->TotalRunTime_total += (collected_number)p->TotalRunTime.current.Data;
+ }
+
+ {
+ struct hypervisor_processor *p;
+ dfe_start_read(item->instance, p) {
+ rrddim_set_by_pointer(p->st_HypervisorProcessor, p->rd_HypervisorRunTime, (collected_number) p->HypervisorRunTime_total);
+ rrddim_set_by_pointer(p->st_HypervisorProcessor, p->rd_GuestRunTime, (collected_number) p->GuestRunTime_total);
+ rrddim_set_by_pointer(p->st_HypervisorProcessor, p->rd_RemoteRunTime, (collected_number) p->RemoteRunTime_total);
+ rrdset_done(p->st_HypervisorProcessor);
+
+ rrddim_set_by_pointer(p->st_HypervisorProcessorTotal, p->rd_TotalRunTime, (collected_number) p->TotalRunTime_total);
+ rrdset_done(p->st_HypervisorProcessorTotal);
+
+ p->GuestRunTime_total = 0;
+ p->HypervisorRunTime_total = 0;
+ p->RemoteRunTime_total = 0;
+ p->TotalRunTime_total = 0;
+ }
+ dfe_done(p);
+ }
+
+ return true;
+}
+
+hyperv_perf_item hyperv_perf_list[] = {
+ {.registry_name = "Hyper-V Dynamic Memory VM",
+ .function_collect = do_hyperv_memory,
+ .dict_insert_cb = dict_hyperv_memory_insert_cb,
+ .dict_size = sizeof(struct hypervisor_memory)},
+
+ {.registry_name = "Hyper-V VM Vid Partition",
+ .function_collect = do_hyperv_vid_partition,
+ .dict_insert_cb = dict_hyperv_partition_insert_cb,
+ .dict_size = sizeof(struct hypervisor_partition)},
+
+ {
+ .registry_name = "Hyper-V Virtual Machine Health Summary",
+ .function_collect = do_hyperv_health_summary,
+ },
+
+ {
+ .registry_name = "Hyper-V Hypervisor Root Partition",
+ .function_collect = do_hyperv_root_partition,
+ .dict_insert_cb = dict_hyperv_root_partition_insert_cb,
+ .dict_size = sizeof(struct hypervisor_root_partition),
+ },
+
+ {.registry_name = "Hyper-V Virtual Storage Device",
+ .function_collect = do_hyperv_storage_device,
+ .dict_insert_cb = dict_hyperv_storage_device_insert_cb,
+ .dict_size = sizeof(struct hypervisor_storage_device)},
+
+ {.registry_name = "Hyper-V Virtual Switch",
+ .function_collect = do_hyperv_switch,
+ .dict_insert_cb = dict_hyperv_switch_insert_cb,
+ .dict_size = sizeof(struct hypervisor_switch)},
+
+ {.registry_name = "Hyper-V Virtual Network Adapter",
+ .function_collect = do_hyperv_network_adapter,
+ .dict_insert_cb = dict_hyperv_network_adapter_insert_cb,
+ .dict_size = sizeof(struct hypervisor_network_adapter)},
+
+ {.registry_name = "Hyper-V Hypervisor Virtual Processor",
+ .function_collect = do_hyperv_processor,
+ .dict_insert_cb = dict_hyperv_processor_insert_cb,
+ .dict_size = sizeof(struct hypervisor_processor)},
+
+ {.registry_name = NULL, .function_collect = NULL}};
+
+int do_PerflibHyperV(int update_every, usec_t dt __maybe_unused) {
+ static bool initialized = false;
+
+ if (unlikely(!initialized)) {
+ for (int i = 0; hyperv_perf_list[i].registry_name != NULL; i++) {
+ hyperv_perf_item *item = &hyperv_perf_list[i];
+ if (item->dict_insert_cb) {
+ item->instance = dictionary_create_advanced(DICT_PERF_OPTION, NULL, item->dict_size);
+ dictionary_register_insert_callback(item->instance, item->dict_insert_cb, NULL);
+ }
+ }
+ initialized = true;
+ }
+
+ for (int i = 0; hyperv_perf_list[i].registry_name != NULL; i++) {
+ // Find the registry ID using the registry name
+ DWORD id = RegistryFindIDByName(hyperv_perf_list[i].registry_name);
+ if (id == PERFLIB_REGISTRY_NAME_NOT_FOUND)
+ continue;
+
+ // Get the performance data using the registry ID
+ PERF_DATA_BLOCK *pDataBlock = perflibGetPerformanceData(id);
+ if (!pDataBlock)
+ continue;
+
+ hyperv_perf_list[i].function_collect(pDataBlock, update_every, &hyperv_perf_list[i]);
+ }
+ return 0;
+}
diff --git a/src/collectors/windows.plugin/perflib-memory.c b/src/collectors/windows.plugin/perflib-memory.c
index c876fc68a..e26729cda 100644
--- a/src/collectors/windows.plugin/perflib-memory.c
+++ b/src/collectors/windows.plugin/perflib-memory.c
@@ -1,65 +1,219 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-#include "windows_plugin.h"
-#include "windows-internals.h"
-
-#define _COMMON_PLUGIN_NAME "windows.plugin"
-#define _COMMON_PLUGIN_MODULE_NAME "PerflibMemory"
-#include "../common-contexts/common-contexts.h"
-
-static void initialize(void) {
- ;
-}
-
-static bool do_memory(PERF_DATA_BLOCK *pDataBlock, int update_every) {
- PERF_OBJECT_TYPE *pObjectType = perflibFindObjectTypeByName(pDataBlock, "Memory");
- if (!pObjectType)
- return false;
-
- static COUNTER_DATA pagesPerSec = { .key = "Pages/sec" };
- static COUNTER_DATA pageFaultsPerSec = { .key = "Page Faults/sec" };
-
- if(perflibGetObjectCounter(pDataBlock, pObjectType, &pageFaultsPerSec) &&
- perflibGetObjectCounter(pDataBlock, pObjectType, &pagesPerSec)) {
- ULONGLONG total = pageFaultsPerSec.current.Data;
- ULONGLONG major = pagesPerSec.current.Data;
- ULONGLONG minor = (total > major) ? total - major : 0;
- common_mem_pgfaults(minor, major, update_every);
- }
-
- static COUNTER_DATA availableBytes = { .key = "Available Bytes" };
- static COUNTER_DATA availableKBytes = { .key = "Available KBytes" };
- static COUNTER_DATA availableMBytes = { .key = "Available MBytes" };
- ULONGLONG available_bytes = 0;
-
- if(perflibGetObjectCounter(pDataBlock, pObjectType, &availableBytes))
- available_bytes = availableBytes.current.Data;
- else if(perflibGetObjectCounter(pDataBlock, pObjectType, &availableKBytes))
- available_bytes = availableKBytes.current.Data * 1024;
- else if(perflibGetObjectCounter(pDataBlock, pObjectType, &availableMBytes))
- available_bytes = availableMBytes.current.Data * 1024 * 1024;
-
- common_mem_available(available_bytes, update_every);
-
- return true;
-}
-
-int do_PerflibMemory(int update_every, usec_t dt __maybe_unused) {
- static bool initialized = false;
-
- if(unlikely(!initialized)) {
- initialize();
- initialized = true;
- }
-
- DWORD id = RegistryFindIDByName("Memory");
- if(id == PERFLIB_REGISTRY_NAME_NOT_FOUND)
- return -1;
-
- PERF_DATA_BLOCK *pDataBlock = perflibGetPerformanceData(id);
- if(!pDataBlock) return -1;
-
- do_memory(pDataBlock, update_every);
-
- return 0;
-}
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "windows_plugin.h"
+#include "windows-internals.h"
+
+#define _COMMON_PLUGIN_NAME "windows.plugin"
+#define _COMMON_PLUGIN_MODULE_NAME "PerflibMemory"
+#include "../common-contexts/common-contexts.h"
+
+struct swap {
+ RRDSET *operations;
+ RRDDIM *rd_op_read;
+ RRDDIM *rd_op_write;
+
+ RRDSET *pages;
+ RRDDIM *rd_page_read;
+ RRDDIM *rd_page_write;
+
+ COUNTER_DATA pageReadsTotal;
+ COUNTER_DATA pageWritesTotal;
+ COUNTER_DATA pageInputTotal;
+ COUNTER_DATA pageOutputTotal;
+};
+
+struct system_pool {
+ RRDSET *pool;
+ RRDDIM *rd_paged;
+ RRDDIM *rd_nonpaged;
+
+ COUNTER_DATA pagedData;
+ COUNTER_DATA nonPagedData;
+};
+
+struct swap localSwap = { 0 };
+struct system_pool localPool = { 0 };
+
+void initialize_swap_keys(struct swap *p) {
+ // SWAP Operations
+ p->pageReadsTotal.key = "Page Reads/sec";
+ p->pageWritesTotal.key = "Page Writes/s";
+
+ // Swap Pages
+ p->pageInputTotal.key = "Pages Input/sec";
+ p->pageOutputTotal.key = "Pages Output/s";
+}
+
+void initialize_pool_keys(struct system_pool *p) {
+ p->pagedData.key = "Pool Paged Bytes";
+ p->nonPagedData.key = "Pool Nonpaged Bytes";
+}
+
+static void initialize(void) {
+ initialize_swap_keys(&localSwap);
+ initialize_pool_keys(&localPool);
+}
+
+static void do_memory_swap(PERF_DATA_BLOCK *pDataBlock, PERF_OBJECT_TYPE *pObjectType, int update_every)
+{
+ perflibGetObjectCounter(pDataBlock, pObjectType, &localSwap.pageReadsTotal);
+ perflibGetObjectCounter(pDataBlock, pObjectType, &localSwap.pageWritesTotal);
+ perflibGetObjectCounter(pDataBlock, pObjectType, &localSwap.pageInputTotal);
+ perflibGetObjectCounter(pDataBlock, pObjectType, &localSwap.pageOutputTotal);
+
+ if (!localSwap.operations) {
+ localSwap.operations = rrdset_create_localhost(
+ "mem"
+ , "swap_operations", NULL
+ , "swap"
+ , "mem.swap_iops"
+
+ , "Swap Operations"
+ , "operations/s"
+ , PLUGIN_WINDOWS_NAME
+ , "PerflibMemory"
+ , NETDATA_CHART_PRIO_MEM_SWAPIO
+ , update_every
+ , RRDSET_TYPE_STACKED
+ );
+
+ localSwap.rd_op_read = rrddim_add(localSwap.operations, "read", NULL,
+ 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ localSwap.rd_op_write = rrddim_add(localSwap.operations, "write", NULL,
+ 1, -1, RRD_ALGORITHM_INCREMENTAL);
+ }
+
+ rrddim_set_by_pointer(localSwap.operations,
+ localSwap.rd_op_read,
+ (collected_number)localSwap.pageReadsTotal.current.Data);
+
+ rrddim_set_by_pointer(localSwap.operations,
+ localSwap.rd_op_write,
+ (collected_number)localSwap.pageWritesTotal.current.Data);
+ rrdset_done(localSwap.operations);
+
+ if (!localSwap.pages) {
+ localSwap.pages = rrdset_create_localhost(
+ "mem"
+ , "swap_pages", NULL
+ , "swap"
+ , "mem.swap_pages_io"
+
+ , "Swap Pages"
+ , "pages/s"
+ , PLUGIN_WINDOWS_NAME
+ , "PerflibMemory"
+ , NETDATA_CHART_PRIO_MEM_SWAP_PAGES
+ , update_every
+ , RRDSET_TYPE_STACKED
+ );
+
+ localSwap.rd_page_read = rrddim_add(localSwap.pages, "read", NULL,
+ 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ localSwap.rd_page_write = rrddim_add(localSwap.pages, "write", NULL,
+ 1, -1, RRD_ALGORITHM_INCREMENTAL);
+ }
+
+ rrddim_set_by_pointer(localSwap.pages,
+ localSwap.rd_page_read,
+ (collected_number)localSwap.pageInputTotal.current.Data);
+
+ rrddim_set_by_pointer(localSwap.pages,
+ localSwap.rd_page_write,
+ (collected_number)localSwap.pageOutputTotal.current.Data);
+ rrdset_done(localSwap.pages);
+}
+
+static void do_memory_system_pool(PERF_DATA_BLOCK *pDataBlock, PERF_OBJECT_TYPE *pObjectType, int update_every)
+{
+ perflibGetObjectCounter(pDataBlock, pObjectType, &localPool.nonPagedData);
+ perflibGetObjectCounter(pDataBlock, pObjectType, &localPool.pagedData);
+
+ if (!localPool.pool) {
+ localPool.pool = rrdset_create_localhost(
+ "mem"
+ , "system_pool", NULL
+ , "mem"
+ , "mem.system_pool_size"
+
+ , "System Memory Pool"
+ , "bytes"
+ , PLUGIN_WINDOWS_NAME
+ , "PerflibMemory"
+ , NETDATA_CHART_PRIO_MEM_SYSTEM_POOL
+ , update_every
+ , RRDSET_TYPE_STACKED
+ );
+
+ localPool.rd_paged = rrddim_add(localPool.pool, "paged", NULL,
+ 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ localPool.rd_nonpaged = rrddim_add(localPool.pool, "pool-paged", NULL,
+ 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ }
+
+ rrddim_set_by_pointer(localPool.pool,
+ localPool.rd_paged,
+ (collected_number)localPool.pagedData.current.Data);
+
+ rrddim_set_by_pointer(localPool.pool,
+ localPool.rd_nonpaged,
+ (collected_number)localPool.nonPagedData.current.Data);
+ rrdset_done(localPool.pool);
+}
+
+static bool do_memory(PERF_DATA_BLOCK *pDataBlock, int update_every) {
+ PERF_OBJECT_TYPE *pObjectType = perflibFindObjectTypeByName(pDataBlock, "Memory");
+ if (!pObjectType)
+ return false;
+
+ static COUNTER_DATA pagesPerSec = { .key = "Pages/sec" };
+ static COUNTER_DATA pageFaultsPerSec = { .key = "Page Faults/sec" };
+
+ if(perflibGetObjectCounter(pDataBlock, pObjectType, &pageFaultsPerSec) &&
+ perflibGetObjectCounter(pDataBlock, pObjectType, &pagesPerSec)) {
+ ULONGLONG total = pageFaultsPerSec.current.Data;
+ ULONGLONG major = pagesPerSec.current.Data;
+ ULONGLONG minor = (total > major) ? total - major : 0;
+ common_mem_pgfaults(minor, major, update_every);
+ }
+
+ static COUNTER_DATA availableBytes = { .key = "Available Bytes" };
+ static COUNTER_DATA availableKBytes = { .key = "Available KBytes" };
+ static COUNTER_DATA availableMBytes = { .key = "Available MBytes" };
+ ULONGLONG available_bytes = 0;
+
+ if(perflibGetObjectCounter(pDataBlock, pObjectType, &availableBytes))
+ available_bytes = availableBytes.current.Data;
+ else if(perflibGetObjectCounter(pDataBlock, pObjectType, &availableKBytes))
+ available_bytes = availableKBytes.current.Data * 1024;
+ else if(perflibGetObjectCounter(pDataBlock, pObjectType, &availableMBytes))
+ available_bytes = availableMBytes.current.Data * 1024 * 1024;
+
+ common_mem_available(available_bytes, update_every);
+
+ do_memory_swap(pDataBlock, pObjectType, update_every);
+
+ do_memory_system_pool(pDataBlock, pObjectType, update_every);
+
+ return true;
+}
+
+int do_PerflibMemory(int update_every, usec_t dt __maybe_unused) {
+ static bool initialized = false;
+
+ if(unlikely(!initialized)) {
+ initialize();
+ initialized = true;
+ }
+
+ DWORD id = RegistryFindIDByName("Memory");
+ if(id == PERFLIB_REGISTRY_NAME_NOT_FOUND)
+ return -1;
+
+ PERF_DATA_BLOCK *pDataBlock = perflibGetPerformanceData(id);
+ if(!pDataBlock) return -1;
+
+ do_memory(pDataBlock, update_every);
+
+ return 0;
+}
diff --git a/src/collectors/windows.plugin/perflib-mssql.c b/src/collectors/windows.plugin/perflib-mssql.c
new file mode 100644
index 000000000..f6f5c434d
--- /dev/null
+++ b/src/collectors/windows.plugin/perflib-mssql.c
@@ -0,0 +1,1413 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "windows_plugin.h"
+#include "windows-internals.h"
+
+// https://learn.microsoft.com/en-us/sql/sql-server/install/instance-configuration?view=sql-server-ver16
+#define NETDATA_MAX_INSTANCE_NAME 32
+#define NETDATA_MAX_INSTANCE_OBJECT 128
+
+BOOL is_sqlexpress = FALSE;
+
+enum netdata_mssql_metrics {
+ NETDATA_MSSQL_GENERAL_STATS,
+ NETDATA_MSSQL_SQL_ERRORS,
+ NETDATA_MSSQL_DATABASE,
+ NETDATA_MSSQL_LOCKS,
+ NETDATA_MSSQL_MEMORY,
+ NETDATA_MSSQL_BUFFER_MANAGEMENT,
+ NETDATA_MSSQL_SQL_STATS,
+ NETDATA_MSSQL_ACCESS_METHODS,
+
+ NETDATA_MSSQL_METRICS_END
+};
+
+struct mssql_instance {
+ char *instanceID;
+
+ char *objectName[NETDATA_MSSQL_METRICS_END];
+
+ RRDSET *st_user_connections;
+ RRDDIM *rd_user_connections;
+
+ RRDSET *st_process_blocked;
+ RRDDIM *rd_process_blocked;
+
+ RRDSET *st_stats_auto_param;
+ RRDDIM *rd_stats_auto_param;
+
+ RRDSET *st_stats_batch_request;
+ RRDDIM *rd_stats_batch_request;
+
+ RRDSET *st_stats_safe_auto;
+ RRDDIM *rd_stats_safe_auto;
+
+ RRDSET *st_stats_compilation;
+ RRDDIM *rd_stats_compilation;
+
+ RRDSET *st_stats_recompiles;
+ RRDDIM *rd_stats_recompiles;
+
+ RRDSET *st_buff_cache_hits;
+ RRDDIM *rd_buff_cache_hits;
+
+ RRDSET *st_buff_cache_page_life_expectancy;
+ RRDDIM *rd_buff_cache_page_life_expectancy;
+
+ RRDSET *st_buff_checkpoint_pages;
+ RRDDIM *rd_buff_checkpoint_pages;
+
+ RRDSET *st_buff_page_iops;
+ RRDDIM *rd_buff_page_reads;
+ RRDDIM *rd_buff_page_writes;
+
+ RRDSET *st_access_method_page_splits;
+ RRDDIM *rd_access_method_page_splits;
+
+ RRDSET *st_sql_errors;
+ RRDDIM *rd_sql_errors;
+
+ RRDSET *st_lockWait;
+ RRDSET *st_deadLocks;
+ DICTIONARY *locks_instances;
+
+ DICTIONARY *databases;
+
+ RRDSET *st_conn_memory;
+ RRDDIM *rd_conn_memory;
+
+ RRDSET *st_ext_benefit_mem;
+ RRDDIM *rd_ext_benefit_mem;
+
+ RRDSET *st_pending_mem_grant;
+ RRDDIM *rd_pending_mem_grant;
+
+ RRDSET *st_mem_tot_server;
+ RRDDIM *rd_mem_tot_server;
+
+ COUNTER_DATA MSSQLAccessMethodPageSplits;
+ COUNTER_DATA MSSQLBufferCacheHits;
+ COUNTER_DATA MSSQLBufferCheckpointPages;
+ COUNTER_DATA MSSQLBufferPageLifeExpectancy;
+ COUNTER_DATA MSSQLBufferPageReads;
+ COUNTER_DATA MSSQLBufferPageWrites;
+ COUNTER_DATA MSSQLBlockedProcesses;
+ COUNTER_DATA MSSQLUserConnections;
+ COUNTER_DATA MSSQLConnectionMemoryBytes;
+ COUNTER_DATA MSSQLExternalBenefitOfMemory;
+ COUNTER_DATA MSSQLPendingMemoryGrants;
+ COUNTER_DATA MSSQLSQLErrorsTotal;
+ COUNTER_DATA MSSQLTotalServerMemory;
+ COUNTER_DATA MSSQLStatsAutoParameterization;
+ COUNTER_DATA MSSQLStatsBatchRequests;
+ COUNTER_DATA MSSQLStatSafeAutoParameterization;
+ COUNTER_DATA MSSQLCompilations;
+ COUNTER_DATA MSSQLRecompilations;
+};
+
+enum lock_instance_idx {
+ NETDATA_MSSQL_ENUM_MLI_IDX_WAIT,
+ NETDATA_MSSQL_ENUM_MLI_IDX_DEAD_LOCKS,
+
+ NETDATA_MSSQL_ENUM_MLI_IDX_END
+};
+
+struct mssql_lock_instance {
+ struct mssql_instance *parent;
+
+ COUNTER_DATA lockWait;
+ COUNTER_DATA deadLocks;
+
+ RRDDIM *rd_lockWait;
+ RRDDIM *rd_deadLocks;
+
+ uint32_t updated;
+};
+
+enum db_instance_idx {
+ NETDATA_MSSQL_ENUM_MDI_IDX_FILE_SIZE,
+ NETDATA_MSSQL_ENUM_MDI_IDX_ACTIVE_TRANSACTIONS,
+ NETDATA_MSSQL_ENUM_MDI_IDX_BACKUP_RESTORE_OP,
+ NETDATA_MSSQL_ENUM_MDI_IDX_LOG_FLUSHED,
+ NETDATA_MSSQL_ENUM_MDI_IDX_LOG_FLUSHES,
+ NETDATA_MSSQL_ENUM_MDI_IDX_TRANSACTIONS,
+ NETDATA_MSSQL_ENUM_MDI_IDX_WRITE_TRANSACTIONS,
+
+ NETDATA_MSSQL_ENUM_MDI_IDX_END
+};
+
+struct mssql_db_instance {
+ struct mssql_instance *parent;
+
+ RRDSET *st_db_data_file_size;
+ RRDSET *st_db_active_transactions;
+ RRDSET *st_db_backup_restore_operations;
+ RRDSET *st_db_log_flushed;
+ RRDSET *st_db_log_flushes;
+ RRDSET *st_db_transactions;
+ RRDSET *st_db_write_transactions;
+
+ RRDDIM *rd_db_data_file_size;
+ RRDDIM *rd_db_active_transactions;
+ RRDDIM *rd_db_backup_restore_operations;
+ RRDDIM *rd_db_log_flushed;
+ RRDDIM *rd_db_log_flushes;
+ RRDDIM *rd_db_transactions;
+ RRDDIM *rd_db_write_transactions;
+
+ COUNTER_DATA MSSQLDatabaseActiveTransactions;
+ COUNTER_DATA MSSQLDatabaseBackupRestoreOperations;
+ COUNTER_DATA MSSQLDatabaseDataFileSize;
+ COUNTER_DATA MSSQLDatabaseLogFlushed;
+ COUNTER_DATA MSSQLDatabaseLogFlushes;
+ COUNTER_DATA MSSQLDatabaseTransactions;
+ COUNTER_DATA MSSQLDatabaseWriteTransactions;
+
+ uint32_t updated;
+};
+
+static DICTIONARY *mssql_instances = NULL;
+
+static void initialize_mssql_objects(struct mssql_instance *p, const char *instance) {
+ char prefix[NETDATA_MAX_INSTANCE_NAME];
+ if (!strcmp(instance, "MSSQLSERVER")) {
+ strncpyz(prefix, "SQLServer:", sizeof(prefix) - 1);
+ } else if (!strcmp(instance, "SQLEXPRESS")) {
+ strncpyz(prefix, "MSSQL$SQLEXPRESS:", sizeof(prefix) - 1);
+ } else {
+ char *express = (!is_sqlexpress) ? "" : "SQLEXPRESS";
+ snprintfz(prefix, sizeof(prefix) - 1, "MSSQL$%s:%s:", express, instance);
+ }
+
+ size_t length = strlen(prefix);
+ char name[NETDATA_MAX_INSTANCE_OBJECT];
+ snprintfz(name, sizeof(name) - 1, "%s%s", prefix, "General Statistics");
+ p->objectName[NETDATA_MSSQL_GENERAL_STATS] = strdup(name);
+
+ strncpyz(&name[length], "SQL Errors", sizeof(name) - length);
+ p->objectName[NETDATA_MSSQL_SQL_ERRORS] = strdup(name);
+
+ strncpyz(&name[length], "Databases", sizeof(name) - length);
+ p->objectName[NETDATA_MSSQL_DATABASE] = strdup(name);
+
+ strncpyz(&name[length], "SQL Statistics", sizeof(name) - length);
+ p->objectName[NETDATA_MSSQL_SQL_STATS] = strdup(name);
+
+ strncpyz(&name[length], "Buffer Manager", sizeof(name) - length);
+ p->objectName[NETDATA_MSSQL_BUFFER_MANAGEMENT] = strdup(name);
+
+ strncpyz(&name[length], "Memory Manager", sizeof(name) - length);
+ p->objectName[NETDATA_MSSQL_MEMORY] = strdup(name);
+
+ strncpyz(&name[length], "Locks", sizeof(name) - length);
+ p->objectName[NETDATA_MSSQL_LOCKS] = strdup(name);
+
+ strncpyz(&name[length], "Access Methods", sizeof(name) - length);
+ p->objectName[NETDATA_MSSQL_ACCESS_METHODS] = strdup(name);
+
+ p->instanceID = strdup(instance);
+}
+
+static inline void initialize_mssql_keys(struct mssql_instance *p) {
+ // General Statistics
+ p->MSSQLUserConnections.key = "User Connections";
+ p->MSSQLBlockedProcesses.key = "Processes blocked";
+
+ // SQL Statistics
+ p->MSSQLStatsAutoParameterization.key = "Auto-Param Attempts/sec";
+ p->MSSQLStatsBatchRequests.key = "Batch Requests/sec";
+ p->MSSQLStatSafeAutoParameterization.key = "Safe Auto-Params/sec";
+ p->MSSQLCompilations.key = "SQL Compilations/sec";
+ p->MSSQLRecompilations.key = "SQL Re-Compilations/sec";
+
+ // Buffer Management
+ p->MSSQLBufferCacheHits.key = "Buffer cache hit ratio";
+ p->MSSQLBufferPageLifeExpectancy.key = "Page life expectancy";
+ p->MSSQLBufferCheckpointPages.key = "Checkpoint pages/sec";
+ p->MSSQLBufferPageReads.key = "Page reads/sec";
+ p->MSSQLBufferPageWrites.key = "Page writes/sec";
+
+ // Access Methods
+ p->MSSQLAccessMethodPageSplits.key = "Page Splits/sec";
+
+ // Errors
+ p->MSSQLSQLErrorsTotal.key = "Errors/sec";
+
+ p->MSSQLConnectionMemoryBytes.key = "Connection Memory (KB)";
+ p->MSSQLExternalBenefitOfMemory.key = "External benefit of memory";
+ p->MSSQLPendingMemoryGrants.key = "Memory Grants Pending";
+ p->MSSQLTotalServerMemory.key = "Total Server Memory (KB)";
+}
+
+void dict_mssql_insert_locks_cb(const DICTIONARY_ITEM *item __maybe_unused, void *value, void *data __maybe_unused) {
+ struct mssql_lock_instance *ptr = value;
+ ptr->deadLocks.key = "Number of Deadlocks/sec";
+ ptr->lockWait.key = "Lock Waits/sec";
+}
+
+void dict_mssql_insert_databases_cb(const DICTIONARY_ITEM *item __maybe_unused, void *value, void *data __maybe_unused) {
+ struct mssql_db_instance *ptr = value;
+
+ ptr->MSSQLDatabaseDataFileSize.key = "Data File(s) Size (KB)";
+ ptr->MSSQLDatabaseActiveTransactions.key = "Active Transactions";
+ ptr->MSSQLDatabaseBackupRestoreOperations.key = "Backup/Restore Throughput/sec";
+ ptr->MSSQLDatabaseLogFlushed.key = "Log Bytes Flushed/sec";
+ ptr->MSSQLDatabaseLogFlushes.key = "Log Flushes/sec";
+ ptr->MSSQLDatabaseTransactions.key = "Transactions/sec";
+ ptr->MSSQLDatabaseWriteTransactions.key = "Write Transactions/sec";
+}
+
+void dict_mssql_insert_cb(const DICTIONARY_ITEM *item __maybe_unused, void *value, void *data __maybe_unused) {
+ struct mssql_instance *p = value;
+ const char *instance = dictionary_acquired_item_name((DICTIONARY_ITEM *)item);
+
+ if (!p->locks_instances) {
+ p->locks_instances = dictionary_create_advanced(
+ DICT_OPTION_DONT_OVERWRITE_VALUE | DICT_OPTION_FIXED_SIZE, NULL, sizeof(struct mssql_lock_instance));
+ dictionary_register_insert_callback(p->locks_instances, dict_mssql_insert_locks_cb, NULL);
+ }
+
+ if (!p->databases) {
+ p->databases = dictionary_create_advanced(
+ DICT_OPTION_DONT_OVERWRITE_VALUE | DICT_OPTION_FIXED_SIZE, NULL, sizeof(struct mssql_db_instance));
+ dictionary_register_insert_callback(p->databases, dict_mssql_insert_databases_cb, NULL);
+ }
+
+ initialize_mssql_objects(p, instance);
+ initialize_mssql_keys(p);
+}
+
+static int mssql_fill_dictionary() {
+ HKEY hKey;
+ LSTATUS ret = RegOpenKeyExA(
+ HKEY_LOCAL_MACHINE, "SOFTWARE\\Microsoft\\Microsoft SQL Server\\Instance Names\\SQL", 0, KEY_READ, &hKey);
+ if (ret != ERROR_SUCCESS)
+ return -1;
+
+ DWORD values = 0;
+
+ ret = RegQueryInfoKey(hKey, NULL, NULL, NULL, NULL, NULL, NULL, &values, NULL, NULL, NULL, NULL);
+ if (ret != ERROR_SUCCESS) {
+ goto endMSSQLFillDict;
+ }
+
+ if (!values) {
+ ret = ERROR_PATH_NOT_FOUND;
+ goto endMSSQLFillDict;
+ }
+
+// https://learn.microsoft.com/en-us/windows/win32/sysinfo/enumerating-registry-subkeys
+#define REGISTRY_MAX_VALUE 16383
+
+ DWORD i;
+ char avalue[REGISTRY_MAX_VALUE] = {'\0'};
+ DWORD length = REGISTRY_MAX_VALUE;
+ for (i = 0; i < values; i++) {
+ avalue[0] = '\0';
+
+ ret = RegEnumValue(hKey, i, avalue, &length, NULL, NULL, NULL, NULL);
+ if (ret != ERROR_SUCCESS)
+ continue;
+
+ if (!strcmp(avalue, "SQLEXPRESS")) {
+ is_sqlexpress = TRUE;
+ }
+
+ struct mssql_instance *p = dictionary_set(mssql_instances, avalue, NULL, sizeof(*p));
+ }
+
+endMSSQLFillDict:
+ RegCloseKey(hKey);
+
+ return (ret == ERROR_SUCCESS) ? 0 : -1;
+}
+
+static int initialize(void) {
+ mssql_instances = dictionary_create_advanced(
+ DICT_OPTION_DONT_OVERWRITE_VALUE | DICT_OPTION_FIXED_SIZE, NULL, sizeof(struct mssql_instance));
+
+ dictionary_register_insert_callback(mssql_instances, dict_mssql_insert_cb, NULL);
+
+ if (mssql_fill_dictionary()) {
+ return -1;
+ }
+
+ return 0;
+}
+
+static void do_mssql_general_stats(PERF_DATA_BLOCK *pDataBlock, struct mssql_instance *p, int update_every) {
+ char id[RRD_ID_LENGTH_MAX + 1];
+
+ PERF_OBJECT_TYPE *pObjectType = perflibFindObjectTypeByName(pDataBlock, p->objectName[NETDATA_MSSQL_GENERAL_STATS]);
+ if (!pObjectType)
+ return;
+
+ if (perflibGetObjectCounter(pDataBlock, pObjectType, &p->MSSQLUserConnections)) {
+ if (!p->st_user_connections) {
+ snprintfz(id, RRD_ID_LENGTH_MAX, "instance_%s_user_connections", p->instanceID);
+ netdata_fix_chart_name(id);
+ p->st_user_connections = rrdset_create_localhost(
+ "mssql",
+ id,
+ NULL,
+ "connections",
+ "mssql.instance_user_connections",
+ "User connections",
+ "connections",
+ PLUGIN_WINDOWS_NAME,
+ "PerflibMSSQL",
+ PRIO_MSSQL_USER_CONNECTIONS,
+ update_every,
+ RRDSET_TYPE_LINE);
+
+ p->rd_user_connections = rrddim_add(p->st_user_connections, "user", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+
+ rrdlabels_add(p->st_user_connections->rrdlabels, "mssql_instance", p->instanceID, RRDLABEL_SRC_AUTO);
+ }
+
+ rrddim_set_by_pointer(
+ p->st_user_connections, p->rd_user_connections, (collected_number)p->MSSQLUserConnections.current.Data);
+ rrdset_done(p->st_user_connections);
+ }
+
+ if (perflibGetObjectCounter(pDataBlock, pObjectType, &p->MSSQLBlockedProcesses)) {
+ if (!p->st_process_blocked) {
+ snprintfz(id, RRD_ID_LENGTH_MAX, "instance_%s_blocked_process", p->instanceID);
+ netdata_fix_chart_name(id);
+ p->st_process_blocked = rrdset_create_localhost(
+ "mssql",
+ id,
+ NULL,
+ "processes",
+ "mssql.instance_blocked_processes",
+ "Blocked processes",
+ "process",
+ PLUGIN_WINDOWS_NAME,
+ "PerflibMSSQL",
+ PRIO_MSSQL_BLOCKED_PROCESSES,
+ update_every,
+ RRDSET_TYPE_LINE);
+
+ p->rd_process_blocked = rrddim_add(p->st_process_blocked, "blocked", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+
+ rrdlabels_add(p->st_process_blocked->rrdlabels, "mssql_instance", p->instanceID, RRDLABEL_SRC_AUTO);
+ }
+
+ rrddim_set_by_pointer(
+ p->st_process_blocked, p->rd_process_blocked, (collected_number)p->MSSQLBlockedProcesses.current.Data);
+ rrdset_done(p->st_process_blocked);
+ }
+}
+
+static void do_mssql_sql_statistics(PERF_DATA_BLOCK *pDataBlock, struct mssql_instance *p, int update_every) {
+ char id[RRD_ID_LENGTH_MAX + 1];
+
+ PERF_OBJECT_TYPE *pObjectType = perflibFindObjectTypeByName(pDataBlock, p->objectName[NETDATA_MSSQL_SQL_STATS]);
+ if (!pObjectType)
+ return;
+
+ if (perflibGetObjectCounter(pDataBlock, pObjectType, &p->MSSQLStatsAutoParameterization)) {
+ if (!p->st_stats_auto_param) {
+ snprintfz(id, RRD_ID_LENGTH_MAX, "instance_%s_sqlstats_auto_parameterization_attempts", p->instanceID);
+ netdata_fix_chart_name(id);
+ p->st_stats_auto_param = rrdset_create_localhost(
+ "mssql",
+ id,
+ NULL,
+ "sql activity",
+ "mssql.instance_sqlstats_auto_parameterization_attempts",
+ "Failed auto-parameterization attempts",
+ "attempts/s",
+ PLUGIN_WINDOWS_NAME,
+ "PerflibMSSQL",
+ PRIO_MSSQL_STATS_AUTO_PARAMETRIZATION,
+ update_every,
+ RRDSET_TYPE_LINE);
+
+ p->rd_stats_auto_param =
+ rrddim_add(p->st_stats_auto_param, "failed", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+
+ rrdlabels_add(p->st_stats_auto_param->rrdlabels, "mssql_instance", p->instanceID, RRDLABEL_SRC_AUTO);
+ }
+
+ rrddim_set_by_pointer(
+ p->st_stats_auto_param,
+ p->rd_stats_auto_param,
+ (collected_number)p->MSSQLStatsAutoParameterization.current.Data);
+ rrdset_done(p->st_stats_auto_param);
+ }
+
+ if (perflibGetObjectCounter(pDataBlock, pObjectType, &p->MSSQLStatsBatchRequests)) {
+ if (!p->st_stats_batch_request) {
+ snprintfz(id, RRD_ID_LENGTH_MAX, "instance_%s_sqlstats_batch_requests", p->instanceID);
+ netdata_fix_chart_name(id);
+ p->st_stats_batch_request = rrdset_create_localhost(
+ "mssql",
+ id,
+ NULL,
+ "sql activity",
+ "mssql.instance_sqlstats_batch_requests",
+ "Total of batches requests",
+ "requests/s",
+ PLUGIN_WINDOWS_NAME,
+ "PerflibMSSQL",
+ PRIO_MSSQL_STATS_BATCH_REQUEST,
+ update_every,
+ RRDSET_TYPE_LINE);
+
+ p->rd_stats_batch_request =
+ rrddim_add(p->st_stats_batch_request, "batch", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+
+ rrdlabels_add(p->st_stats_batch_request->rrdlabels, "mssql_instance", p->instanceID, RRDLABEL_SRC_AUTO);
+ }
+
+ rrddim_set_by_pointer(
+ p->st_stats_batch_request,
+ p->rd_stats_batch_request,
+ (collected_number)p->MSSQLStatsBatchRequests.current.Data);
+ rrdset_done(p->st_stats_batch_request);
+ }
+
+ if (perflibGetObjectCounter(pDataBlock, pObjectType, &p->MSSQLStatSafeAutoParameterization)) {
+ if (!p->st_stats_safe_auto) {
+ snprintfz(id, RRD_ID_LENGTH_MAX, "instance_%s_sqlstats_safe_auto_parameterization_attempts", p->instanceID);
+ netdata_fix_chart_name(id);
+ p->st_stats_safe_auto = rrdset_create_localhost(
+ "mssql",
+ id,
+ NULL,
+ "sql activity",
+ "mssql.instance_sqlstats_safe_auto_parameterization_attempts",
+ "Safe auto-parameterization attempts",
+ "attempts/s",
+ PLUGIN_WINDOWS_NAME,
+ "PerflibMSSQL",
+ PRIO_MSSQL_STATS_SAFE_AUTO_PARAMETRIZATION,
+ update_every,
+ RRDSET_TYPE_LINE);
+
+ p->rd_stats_safe_auto = rrddim_add(p->st_stats_safe_auto, "safe", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+
+ rrdlabels_add(p->st_stats_safe_auto->rrdlabels, "mssql_instance", p->instanceID, RRDLABEL_SRC_AUTO);
+ }
+
+ rrddim_set_by_pointer(
+ p->st_stats_safe_auto,
+ p->rd_stats_safe_auto,
+ (collected_number)p->MSSQLStatSafeAutoParameterization.current.Data);
+ rrdset_done(p->st_stats_safe_auto);
+ }
+
+ if (perflibGetObjectCounter(pDataBlock, pObjectType, &p->MSSQLCompilations)) {
+ if (!p->st_stats_compilation) {
+ snprintfz(id, RRD_ID_LENGTH_MAX, "instance_%s_sqlstats_sql_compilations", p->instanceID);
+ netdata_fix_chart_name(id);
+ p->st_stats_compilation = rrdset_create_localhost(
+ "mssql",
+ id,
+ NULL,
+ "sql activity",
+ "mssql.instance_sqlstats_sql_compilations",
+ "SQL compilations",
+ "compilations/s",
+ PLUGIN_WINDOWS_NAME,
+ "PerflibMSSQL",
+ PRIO_MSSQL_STATS_COMPILATIONS,
+ update_every,
+ RRDSET_TYPE_LINE);
+
+ p->rd_stats_compilation =
+ rrddim_add(p->st_stats_compilation, "compilations", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+
+ rrdlabels_add(p->st_stats_compilation->rrdlabels, "mssql_instance", p->instanceID, RRDLABEL_SRC_AUTO);
+ }
+
+ rrddim_set_by_pointer(
+ p->st_stats_compilation, p->rd_stats_compilation, (collected_number)p->MSSQLCompilations.current.Data);
+ rrdset_done(p->st_stats_compilation);
+ }
+
+ if (perflibGetObjectCounter(pDataBlock, pObjectType, &p->MSSQLRecompilations)) {
+ if (!p->st_stats_recompiles) {
+ snprintfz(id, RRD_ID_LENGTH_MAX, "instance_%s_sqlstats_sql_recompilations", p->instanceID);
+ netdata_fix_chart_name(id);
+ p->st_stats_recompiles = rrdset_create_localhost(
+ "mssql",
+ id,
+ NULL,
+ "sql activity",
+ "mssql.instance_sqlstats_sql_recompilations",
+ "SQL re-compilations",
+ "recompiles/",
+ PLUGIN_WINDOWS_NAME,
+ "PerflibMSSQL",
+ PRIO_MSSQL_STATS_RECOMPILATIONS,
+ update_every,
+ RRDSET_TYPE_LINE);
+
+ p->rd_stats_recompiles =
+ rrddim_add(p->st_stats_recompiles, "recompiles", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+
+ rrdlabels_add(p->st_stats_recompiles->rrdlabels, "mssql_instance", p->instanceID, RRDLABEL_SRC_AUTO);
+ }
+
+ rrddim_set_by_pointer(
+ p->st_stats_recompiles, p->rd_stats_recompiles, (collected_number)p->MSSQLRecompilations.current.Data);
+ rrdset_done(p->st_stats_recompiles);
+ }
+}
+
+static void do_mssql_buffer_management(PERF_DATA_BLOCK *pDataBlock, struct mssql_instance *p, int update_every) {
+ char id[RRD_ID_LENGTH_MAX + 1];
+
+ PERF_OBJECT_TYPE *pObjectType =
+ perflibFindObjectTypeByName(pDataBlock, p->objectName[NETDATA_MSSQL_BUFFER_MANAGEMENT]);
+ if (!pObjectType)
+ return;
+
+ if (perflibGetObjectCounter(pDataBlock, pObjectType, &p->MSSQLBufferCacheHits)) {
+ if (!p->st_buff_cache_hits) {
+ snprintfz(id, RRD_ID_LENGTH_MAX, "instance_%s_cache_hit_ratio", p->instanceID);
+ netdata_fix_chart_name(id);
+ p->st_buff_cache_hits = rrdset_create_localhost(
+ "mssql",
+ id,
+ NULL,
+ "buffer cache",
+ "mssql.instance_cache_hit_ratio",
+ "Buffer Cache hit ratio",
+ "percentage",
+ PLUGIN_WINDOWS_NAME,
+ "PerflibMSSQL",
+ PRIO_MSSQL_BUFF_CACHE_HIT_RATIO,
+ update_every,
+ RRDSET_TYPE_LINE);
+
+ p->rd_buff_cache_hits = rrddim_add(p->st_buff_cache_hits, "hit_ratio", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+
+ rrdlabels_add(p->st_buff_cache_hits->rrdlabels, "mssql_instance", p->instanceID, RRDLABEL_SRC_AUTO);
+ }
+
+ rrddim_set_by_pointer(
+ p->st_buff_cache_hits, p->rd_buff_cache_hits, (collected_number)p->MSSQLBufferCacheHits.current.Data);
+ rrdset_done(p->st_buff_cache_hits);
+ }
+
+ if (perflibGetObjectCounter(pDataBlock, pObjectType, &p->MSSQLBufferCheckpointPages)) {
+ if (!p->st_buff_checkpoint_pages) {
+ snprintfz(id, RRD_ID_LENGTH_MAX, "instance_%s_bufman_checkpoint_pages", p->instanceID);
+ netdata_fix_chart_name(id);
+ p->st_buff_checkpoint_pages = rrdset_create_localhost(
+ "mssql",
+ id,
+ NULL,
+ "buffer cache",
+ "mssql.instance_bufman_checkpoint_pages",
+ "Flushed pages",
+ "pages/s",
+ PLUGIN_WINDOWS_NAME,
+ "PerflibMSSQL",
+ PRIO_MSSQL_BUFF_CHECKPOINT_PAGES,
+ update_every,
+ RRDSET_TYPE_LINE);
+
+ p->rd_buff_checkpoint_pages =
+ rrddim_add(p->st_buff_checkpoint_pages, "log", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+
+ rrdlabels_add(p->st_buff_checkpoint_pages->rrdlabels, "mssql_instance", p->instanceID, RRDLABEL_SRC_AUTO);
+ }
+
+ rrddim_set_by_pointer(
+ p->st_buff_checkpoint_pages,
+ p->rd_buff_checkpoint_pages,
+ (collected_number)p->MSSQLBufferCheckpointPages.current.Data);
+ rrdset_done(p->st_buff_checkpoint_pages);
+ }
+
+ if (perflibGetObjectCounter(pDataBlock, pObjectType, &p->MSSQLBufferPageLifeExpectancy)) {
+ if (!p->st_buff_cache_page_life_expectancy) {
+ snprintfz(id, RRD_ID_LENGTH_MAX, "instance_%s_bufman_page_life_expectancy", p->instanceID);
+ netdata_fix_chart_name(id);
+ p->st_buff_cache_page_life_expectancy = rrdset_create_localhost(
+ "mssql",
+ id,
+ NULL,
+ "buffer cache",
+ "mssql.instance_bufman_page_life_expectancy",
+ "Page life expectancy",
+ "seconds",
+ PLUGIN_WINDOWS_NAME,
+ "PerflibMSSQL",
+ PRIO_MSSQL_BUFF_PAGE_LIFE_EXPECTANCY,
+ update_every,
+ RRDSET_TYPE_LINE);
+
+ p->rd_buff_cache_page_life_expectancy = rrddim_add(
+ p->st_buff_cache_page_life_expectancy, "life_expectancy", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+
+ rrdlabels_add(
+ p->st_buff_cache_page_life_expectancy->rrdlabels, "mssql_instance", p->instanceID, RRDLABEL_SRC_AUTO);
+ }
+
+ rrddim_set_by_pointer(
+ p->st_buff_cache_page_life_expectancy,
+ p->rd_buff_cache_page_life_expectancy,
+ (collected_number)p->MSSQLBufferPageLifeExpectancy.current.Data);
+ rrdset_done(p->st_buff_cache_page_life_expectancy);
+ }
+
+ if (perflibGetObjectCounter(pDataBlock, pObjectType, &p->MSSQLBufferPageReads) &&
+ perflibGetObjectCounter(pDataBlock, pObjectType, &p->MSSQLBufferPageWrites)) {
+ if (!p->st_buff_page_iops) {
+ snprintfz(id, RRD_ID_LENGTH_MAX, "instance_%s_bufman_iops", p->instanceID);
+ netdata_fix_chart_name(id);
+ p->st_buff_page_iops = rrdset_create_localhost(
+ "mssql",
+ id,
+ NULL,
+ "buffer cache",
+ "mssql.instance_bufman_iops",
+ "Number of pages input and output",
+ "pages/s",
+ PLUGIN_WINDOWS_NAME,
+ "PerflibMSSQL",
+ PRIO_MSSQL_BUFF_MAN_IOPS,
+ update_every,
+ RRDSET_TYPE_LINE);
+
+ p->rd_buff_page_reads = rrddim_add(p->st_buff_page_iops, "read", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ p->rd_buff_page_writes =
+ rrddim_add(p->st_buff_page_iops, "written", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+
+ rrdlabels_add(p->st_buff_page_iops->rrdlabels, "mssql_instance", p->instanceID, RRDLABEL_SRC_AUTO);
+ }
+
+ rrddim_set_by_pointer(
+ p->st_buff_page_iops, p->rd_buff_page_reads, (collected_number)p->MSSQLBufferPageReads.current.Data);
+ rrddim_set_by_pointer(
+ p->st_buff_page_iops, p->rd_buff_page_writes, (collected_number)p->MSSQLBufferPageWrites.current.Data);
+
+ rrdset_done(p->st_buff_page_iops);
+ }
+}
+
+static void do_mssql_access_methods(PERF_DATA_BLOCK *pDataBlock, struct mssql_instance *p, int update_every) {
+ char id[RRD_ID_LENGTH_MAX + 1];
+
+ PERF_OBJECT_TYPE *pObjectType =
+ perflibFindObjectTypeByName(pDataBlock, p->objectName[NETDATA_MSSQL_ACCESS_METHODS]);
+ if (!pObjectType)
+ return;
+
+ if (perflibGetObjectCounter(pDataBlock, pObjectType, &p->MSSQLAccessMethodPageSplits)) {
+ if (!p->st_access_method_page_splits) {
+ snprintfz(id, RRD_ID_LENGTH_MAX, "instance_%s_accessmethods_page_splits", p->instanceID);
+ netdata_fix_chart_name(id);
+ p->st_access_method_page_splits = rrdset_create_localhost(
+ "mssql",
+ id,
+ NULL,
+ "buffer cache",
+ "mssql.instance_accessmethods_page_splits",
+ "Page splits",
+ "splits/s",
+ PLUGIN_WINDOWS_NAME,
+ "PerflibMSSQL",
+ PRIO_MSSQL_BUFF_METHODS_PAGE_SPLIT,
+ update_every,
+ RRDSET_TYPE_LINE);
+
+ p->rd_access_method_page_splits =
+ rrddim_add(p->st_access_method_page_splits, "page", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+
+ rrdlabels_add(
+ p->st_access_method_page_splits->rrdlabels, "mssql_instance", p->instanceID, RRDLABEL_SRC_AUTO);
+ }
+
+ rrddim_set_by_pointer(
+ p->st_access_method_page_splits,
+ p->rd_access_method_page_splits,
+ (collected_number)p->MSSQLAccessMethodPageSplits.current.Data);
+ rrdset_done(p->st_access_method_page_splits);
+ }
+}
+
+static void do_mssql_errors(PERF_DATA_BLOCK *pDataBlock, struct mssql_instance *p, int update_every) {
+ char id[RRD_ID_LENGTH_MAX + 1];
+
+ PERF_OBJECT_TYPE *pObjectType = perflibFindObjectTypeByName(pDataBlock, p->objectName[NETDATA_MSSQL_SQL_ERRORS]);
+ if (!pObjectType)
+ return;
+
+ if (perflibGetObjectCounter(pDataBlock, pObjectType, &p->MSSQLSQLErrorsTotal)) {
+ if (!p->st_sql_errors) {
+ snprintfz(id, RRD_ID_LENGTH_MAX, "instance_%s_sql_errors_total", p->instanceID);
+ netdata_fix_chart_name(id);
+ p->st_sql_errors = rrdset_create_localhost(
+ "mssql",
+ id,
+ NULL,
+ "errors",
+ "mssql.instance_sql_errors",
+ "Errors",
+ "errors/s",
+ PLUGIN_WINDOWS_NAME,
+ "PerflibMSSQL",
+ PRIO_MSSQL_SQL_ERRORS,
+ update_every,
+ RRDSET_TYPE_LINE);
+
+ p->rd_sql_errors = rrddim_add(p->st_sql_errors, "errors", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+
+ rrdlabels_add(p->st_sql_errors->rrdlabels, "mssql_instance", p->instanceID, RRDLABEL_SRC_AUTO);
+ }
+
+ rrddim_set_by_pointer(
+ p->st_sql_errors, p->rd_sql_errors, (collected_number)p->MSSQLAccessMethodPageSplits.current.Data);
+ rrdset_done(p->st_sql_errors);
+ }
+}
+
+int dict_mssql_locks_charts_cb(const DICTIONARY_ITEM *item __maybe_unused, void *value, void *data __maybe_unused) {
+ char id[RRD_ID_LENGTH_MAX + 1];
+
+ struct mssql_lock_instance *mli = value;
+ const char *instance = dictionary_acquired_item_name((DICTIONARY_ITEM *)item);
+
+ int *update_every = data;
+
+ if (!mli->parent->st_lockWait) {
+ snprintfz(id, RRD_ID_LENGTH_MAX, "instance_%s_locks_lock_wait", mli->parent->instanceID);
+ netdata_fix_chart_name(id);
+ mli->parent->st_lockWait = rrdset_create_localhost(
+ "mssql",
+ id,
+ NULL,
+ "locks",
+ "mssql.instance_locks_lock_wait",
+ "Lock requests that required the caller to wait.",
+ "locks/s",
+ PLUGIN_WINDOWS_NAME,
+ "PerflibMSSQL",
+ PRIO_MSSQL_LOCKS_WAIT,
+ *update_every,
+ RRDSET_TYPE_LINE);
+
+ rrdlabels_add(
+ mli->parent->st_lockWait->rrdlabels, "mssql_instance", mli->parent->instanceID, RRDLABEL_SRC_AUTO);
+ }
+
+ if (!mli->rd_lockWait) {
+ mli->rd_lockWait = rrddim_add(mli->parent->st_lockWait, instance, NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ }
+
+ if (mli->updated & (1 << NETDATA_MSSQL_ENUM_MLI_IDX_WAIT)) {
+ rrddim_set_by_pointer(
+ mli->parent->st_lockWait, mli->rd_lockWait, (collected_number)(mli->lockWait.current.Data));
+ }
+
+ if (!mli->parent->st_deadLocks) {
+ snprintfz(id, RRD_ID_LENGTH_MAX, "instance_%s_locks_deadlocks", mli->parent->instanceID);
+ netdata_fix_chart_name(id);
+ mli->parent->st_deadLocks = rrdset_create_localhost(
+ "mssql",
+ id,
+ NULL,
+ "locks",
+ "mssql.instance_locks_deadlocks",
+ "Lock requests that resulted in deadlock.",
+ "deadlocks/s",
+ PLUGIN_WINDOWS_NAME,
+ "PerflibMSSQL",
+ PRIO_MSSQL_LOCKS_DEADLOCK,
+ *update_every,
+ RRDSET_TYPE_LINE);
+
+ rrdlabels_add(
+ mli->parent->st_deadLocks->rrdlabels, "mssql_instance", mli->parent->instanceID, RRDLABEL_SRC_AUTO);
+ }
+
+ if (!mli->rd_deadLocks) {
+ mli->rd_deadLocks = rrddim_add(mli->parent->st_deadLocks, instance, NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ }
+
+ if (mli->updated & (1 << NETDATA_MSSQL_ENUM_MLI_IDX_DEAD_LOCKS)) {
+ rrddim_set_by_pointer(
+ mli->parent->st_deadLocks, mli->rd_deadLocks, (collected_number)mli->deadLocks.current.Data);
+ }
+
+ return 1;
+}
+
+static void do_mssql_locks(PERF_DATA_BLOCK *pDataBlock, struct mssql_instance *p, int update_every) {
+ PERF_OBJECT_TYPE *pObjectType = perflibFindObjectTypeByName(pDataBlock, p->objectName[NETDATA_MSSQL_LOCKS]);
+ if (!pObjectType)
+ return;
+
+ PERF_INSTANCE_DEFINITION *pi = NULL;
+ for (LONG i = 0; i < pObjectType->NumInstances; i++) {
+ pi = perflibForEachInstance(pDataBlock, pObjectType, pi);
+ if (!pi)
+ break;
+
+ if (!getInstanceName(pDataBlock, pObjectType, pi, windows_shared_buffer, sizeof(windows_shared_buffer)))
+ strncpyz(windows_shared_buffer, "[unknown]", sizeof(windows_shared_buffer) - 1);
+
+ if (!strcasecmp(windows_shared_buffer, "_Total"))
+ continue;
+
+ struct mssql_lock_instance *mli = dictionary_set(p->locks_instances, windows_shared_buffer, NULL, sizeof(*mli));
+ if (!mli)
+ continue;
+
+ if (!mli->parent) {
+ mli->parent = p;
+ }
+
+ if (perflibGetObjectCounter(pDataBlock, pObjectType, &mli->lockWait))
+ mli->updated |= (1 << NETDATA_MSSQL_ENUM_MLI_IDX_WAIT);
+
+ if (perflibGetObjectCounter(pDataBlock, pObjectType, &mli->deadLocks))
+ mli->updated |= (1 << NETDATA_MSSQL_ENUM_MLI_IDX_DEAD_LOCKS);
+ }
+
+ dictionary_sorted_walkthrough_read(p->locks_instances, dict_mssql_locks_charts_cb, &update_every);
+ rrdset_done(p->st_lockWait);
+ rrdset_done(p->st_deadLocks);
+}
+
+static void mssql_database_backup_restore_chart(struct mssql_db_instance *mli, const char *db, int update_every) {
+ char id[RRD_ID_LENGTH_MAX + 1];
+
+ if (!mli->st_db_backup_restore_operations) {
+ snprintfz(id, RRD_ID_LENGTH_MAX, "db_%s_instance_%s_backup_restore_operations", db, mli->parent->instanceID);
+ netdata_fix_chart_name(id);
+ mli->st_db_backup_restore_operations = rrdset_create_localhost(
+ "mssql",
+ id,
+ NULL,
+ "transactions",
+ "mssql.database_backup_restore_operations",
+ "Backup IO per database",
+ "operations/s",
+ PLUGIN_WINDOWS_NAME,
+ "PerflibMSSQL",
+ PRIO_MSSQL_DATABASE_BACKUP_RESTORE_OPERATIONS,
+ update_every,
+ RRDSET_TYPE_LINE);
+
+ rrdlabels_add(
+ mli->st_db_backup_restore_operations->rrdlabels,
+ "mssql_instance",
+ mli->parent->instanceID,
+ RRDLABEL_SRC_AUTO);
+ rrdlabels_add(mli->st_db_backup_restore_operations->rrdlabels, "database", db, RRDLABEL_SRC_AUTO);
+ }
+
+ if (!mli->rd_db_backup_restore_operations) {
+ mli->rd_db_backup_restore_operations =
+ rrddim_add(mli->st_db_backup_restore_operations, "backup", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ }
+
+ if (mli->updated & (1 << NETDATA_MSSQL_ENUM_MDI_IDX_BACKUP_RESTORE_OP)) {
+ rrddim_set_by_pointer(
+ mli->st_db_backup_restore_operations,
+ mli->rd_db_backup_restore_operations,
+ (collected_number)mli->MSSQLDatabaseBackupRestoreOperations.current.Data);
+ }
+
+ rrdset_done(mli->st_db_backup_restore_operations);
+}
+
+static void mssql_database_log_flushes_chart(struct mssql_db_instance *mli, const char *db, int update_every) {
+ char id[RRD_ID_LENGTH_MAX + 1];
+
+ if (!mli->st_db_log_flushes) {
+ snprintfz(id, RRD_ID_LENGTH_MAX, "db_%s_instance_%s_log_flushes", db, mli->parent->instanceID);
+ netdata_fix_chart_name(id);
+ mli->st_db_log_flushes = rrdset_create_localhost(
+ "mssql",
+ id,
+ NULL,
+ "transactions",
+ "mssql.database_log_flushes",
+ "Log flushes",
+ "flushes/s",
+ PLUGIN_WINDOWS_NAME,
+ "PerflibMSSQL",
+ PRIO_MSSQL_DATABASE_LOG_FLUSHES,
+ update_every,
+ RRDSET_TYPE_LINE);
+
+ rrdlabels_add(mli->st_db_log_flushes->rrdlabels, "mssql_instance", mli->parent->instanceID, RRDLABEL_SRC_AUTO);
+ rrdlabels_add(mli->st_db_log_flushes->rrdlabels, "database", db, RRDLABEL_SRC_AUTO);
+ }
+
+ if (!mli->rd_db_log_flushes) {
+ mli->rd_db_log_flushes = rrddim_add(mli->st_db_log_flushes, "flushes", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ }
+
+ if (mli->updated & (1 << NETDATA_MSSQL_ENUM_MDI_IDX_LOG_FLUSHES)) {
+ rrddim_set_by_pointer(
+ mli->st_db_log_flushes,
+ mli->rd_db_log_flushes,
+ (collected_number)mli->MSSQLDatabaseLogFlushes.current.Data);
+ }
+
+ rrdset_done(mli->st_db_log_flushes);
+}
+
+static void mssql_database_log_flushed_chart(struct mssql_db_instance *mli, const char *db, int update_every) {
+ char id[RRD_ID_LENGTH_MAX + 1];
+
+ if (!mli->st_db_log_flushed) {
+ snprintfz(id, RRD_ID_LENGTH_MAX, "db_%s_instance_%s_log_flushed", db, mli->parent->instanceID);
+ netdata_fix_chart_name(id);
+ mli->st_db_log_flushed = rrdset_create_localhost(
+ "mssql",
+ id,
+ NULL,
+ "transactions",
+ "mssql.database_log_flushed",
+ "Log flushed",
+ "bytes/s",
+ PLUGIN_WINDOWS_NAME,
+ "PerflibMSSQL",
+ PRIO_MSSQL_DATABASE_LOG_FLUSHED,
+ update_every,
+ RRDSET_TYPE_LINE);
+
+ rrdlabels_add(mli->st_db_log_flushed->rrdlabels, "mssql_instance", mli->parent->instanceID, RRDLABEL_SRC_AUTO);
+ rrdlabels_add(mli->st_db_log_flushed->rrdlabels, "database", db, RRDLABEL_SRC_AUTO);
+ }
+
+ if (!mli->rd_db_log_flushed) {
+ mli->rd_db_log_flushed = rrddim_add(mli->st_db_log_flushed, "flushed", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ }
+
+ if (mli->updated & (1 << NETDATA_MSSQL_ENUM_MDI_IDX_LOG_FLUSHED)) {
+ rrddim_set_by_pointer(
+ mli->st_db_log_flushed,
+ mli->rd_db_log_flushed,
+ (collected_number)mli->MSSQLDatabaseLogFlushed.current.Data);
+ }
+
+ rrdset_done(mli->st_db_log_flushed);
+}
+
+static void mssql_transactions_chart(struct mssql_db_instance *mli, const char *db, int update_every) {
+ char id[RRD_ID_LENGTH_MAX + 1];
+
+ if (!mli->st_db_transactions) {
+ snprintfz(id, RRD_ID_LENGTH_MAX, "db_%s_instance_%s_transactions", db, mli->parent->instanceID);
+ netdata_fix_chart_name(id);
+ mli->st_db_transactions = rrdset_create_localhost(
+ "mssql",
+ id,
+ NULL,
+ "transactions",
+ "mssql.database_transactions",
+ "Transactions",
+ "transactions/s",
+ PLUGIN_WINDOWS_NAME,
+ "PerflibMSSQL",
+ PRIO_MSSQL_DATABASE_TRANSACTIONS,
+ update_every,
+ RRDSET_TYPE_LINE);
+
+ rrdlabels_add(mli->st_db_transactions->rrdlabels, "mssql_instance", mli->parent->instanceID, RRDLABEL_SRC_AUTO);
+ rrdlabels_add(mli->st_db_transactions->rrdlabels, "database", db, RRDLABEL_SRC_AUTO);
+ }
+
+ if (!mli->rd_db_transactions) {
+ mli->rd_db_transactions =
+ rrddim_add(mli->st_db_transactions, "transactions", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ }
+
+ if (mli->updated & (1 << NETDATA_MSSQL_ENUM_MDI_IDX_TRANSACTIONS)) {
+ rrddim_set_by_pointer(
+ mli->st_db_transactions,
+ mli->rd_db_transactions,
+ (collected_number)mli->MSSQLDatabaseTransactions.current.Data);
+ }
+
+ rrdset_done(mli->st_db_transactions);
+}
+
+static void mssql_write_transactions_chart(struct mssql_db_instance *mli, const char *db, int update_every) {
+ char id[RRD_ID_LENGTH_MAX + 1];
+
+ if (!mli->st_db_write_transactions) {
+ snprintfz(id, RRD_ID_LENGTH_MAX, "db_%s_instance_%s_write_transactions", db, mli->parent->instanceID);
+ netdata_fix_chart_name(id);
+ mli->st_db_write_transactions = rrdset_create_localhost(
+ "mssql",
+ id,
+ NULL,
+ "transactions",
+ "mssql.database_write_transactions",
+ "Write transactions",
+ "transactions/s",
+ PLUGIN_WINDOWS_NAME,
+ "PerflibMSSQL",
+ PRIO_MSSQL_DATABASE_WRITE_TRANSACTIONS,
+ update_every,
+ RRDSET_TYPE_LINE);
+
+ rrdlabels_add(
+ mli->st_db_write_transactions->rrdlabels, "mssql_instance", mli->parent->instanceID, RRDLABEL_SRC_AUTO);
+ rrdlabels_add(mli->st_db_write_transactions->rrdlabels, "database", db, RRDLABEL_SRC_AUTO);
+ }
+
+ if (!mli->rd_db_write_transactions) {
+ mli->rd_db_write_transactions =
+ rrddim_add(mli->st_db_write_transactions, "write", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ }
+
+ if (mli->updated & (1 << NETDATA_MSSQL_ENUM_MDI_IDX_WRITE_TRANSACTIONS)) {
+ rrddim_set_by_pointer(
+ mli->st_db_write_transactions,
+ mli->rd_db_write_transactions,
+ (collected_number)mli->MSSQLDatabaseWriteTransactions.current.Data);
+ }
+
+ rrdset_done(mli->st_db_write_transactions);
+}
+
+static void mssql_active_transactions_chart(struct mssql_db_instance *mli, const char *db, int update_every) {
+ char id[RRD_ID_LENGTH_MAX + 1];
+
+ if (!mli->st_db_active_transactions) {
+ snprintfz(id, RRD_ID_LENGTH_MAX, "db_%s_instance_%s_active_transactions", db, mli->parent->instanceID);
+ netdata_fix_chart_name(id);
+ mli->st_db_active_transactions = rrdset_create_localhost(
+ "mssql",
+ id,
+ NULL,
+ "transactions",
+ "mssql.database_active_transactions",
+ "Active transactions per database",
+ "transactions",
+ PLUGIN_WINDOWS_NAME,
+ "PerflibMSSQL",
+ PRIO_MSSQL_DATABASE_ACTIVE_TRANSACTIONS,
+ update_every,
+ RRDSET_TYPE_LINE);
+
+ rrdlabels_add(
+ mli->st_db_active_transactions->rrdlabels, "mssql_instance", mli->parent->instanceID, RRDLABEL_SRC_AUTO);
+ rrdlabels_add(mli->st_db_active_transactions->rrdlabels, "database", db, RRDLABEL_SRC_AUTO);
+ }
+
+ if (!mli->rd_db_active_transactions) {
+ mli->rd_db_active_transactions =
+ rrddim_add(mli->st_db_active_transactions, "active", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ }
+
+ if (mli->updated & (1 << NETDATA_MSSQL_ENUM_MDI_IDX_ACTIVE_TRANSACTIONS)) {
+ rrddim_set_by_pointer(
+ mli->st_db_active_transactions,
+ mli->rd_db_active_transactions,
+ (collected_number)mli->MSSQLDatabaseActiveTransactions.current.Data);
+ }
+
+ rrdset_done(mli->st_db_active_transactions);
+}
+
+static void mssql_data_file_size_chart(struct mssql_db_instance *mli, const char *db, int update_every) {
+ char id[RRD_ID_LENGTH_MAX + 1];
+
+ if (!mli->st_db_data_file_size) {
+ snprintfz(id, RRD_ID_LENGTH_MAX, "db_%s_instance_%s_data_files_size", db, mli->parent->instanceID);
+ netdata_fix_chart_name(id);
+ mli->st_db_data_file_size = rrdset_create_localhost(
+ "mssql",
+ id,
+ NULL,
+ "size",
+ "mssql.database_data_files_size",
+ "Current database size.",
+ "bytes",
+ PLUGIN_WINDOWS_NAME,
+ "PerflibMSSQL",
+ PRIO_MSSQL_DATABASE_DATA_FILE_SIZE,
+ update_every,
+ RRDSET_TYPE_LINE);
+
+ rrdlabels_add(
+ mli->st_db_data_file_size->rrdlabels, "mssql_instance", mli->parent->instanceID, RRDLABEL_SRC_AUTO);
+ rrdlabels_add(mli->st_db_data_file_size->rrdlabels, "database", db, RRDLABEL_SRC_AUTO);
+ }
+
+ if (!mli->rd_db_data_file_size) {
+ mli->rd_db_data_file_size = rrddim_add(mli->st_db_data_file_size, "size", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ }
+
+ // FIXME: If the value cannot be retrieved, remove the chart instead of displaying a 0 value.
+ collected_number data =
+ (mli->updated & (1 << NETDATA_MSSQL_ENUM_MDI_IDX_FILE_SIZE)) ? mli->MSSQLDatabaseDataFileSize.current.Data : 0;
+ rrddim_set_by_pointer(mli->st_db_data_file_size, mli->rd_db_data_file_size, data);
+
+ rrdset_done(mli->st_db_data_file_size);
+}
+
+int dict_mssql_databases_charts_cb(const DICTIONARY_ITEM *item __maybe_unused, void *value, void *data __maybe_unused) {
+ struct mssql_db_instance *mli = value;
+ const char *db = dictionary_acquired_item_name((DICTIONARY_ITEM *)item);
+
+ int *update_every = data;
+
+ void (*transaction_chart[])(struct mssql_db_instance *, const char *, int) = {
+ // FIXME: allegedly Netdata collects negative values (MSSQLDatabaseDataFileSize).
+ // something is wrong, perflibdump shows correct values.
+ // mssql_data_file_size_chart,
+ mssql_transactions_chart,
+ mssql_database_backup_restore_chart,
+ mssql_database_log_flushed_chart,
+ mssql_database_log_flushes_chart,
+ mssql_active_transactions_chart,
+ mssql_write_transactions_chart,
+
+ // Last function pointer must be NULL
+ NULL};
+
+ int i;
+ for (i = 0; transaction_chart[i]; i++) {
+ transaction_chart[i](mli, db, *update_every);
+ }
+
+ return 1;
+}
+
+static void do_mssql_databases(PERF_DATA_BLOCK *pDataBlock, struct mssql_instance *p, int update_every) {
+ PERF_OBJECT_TYPE *pObjectType = perflibFindObjectTypeByName(pDataBlock, p->objectName[NETDATA_MSSQL_DATABASE]);
+ if (!pObjectType)
+ return;
+
+ PERF_INSTANCE_DEFINITION *pi = NULL;
+ for (LONG i = 0; i < pObjectType->NumInstances; i++) {
+ pi = perflibForEachInstance(pDataBlock, pObjectType, pi);
+ if (!pi)
+ break;
+
+ if (!getInstanceName(pDataBlock, pObjectType, pi, windows_shared_buffer, sizeof(windows_shared_buffer)))
+ strncpyz(windows_shared_buffer, "[unknown]", sizeof(windows_shared_buffer) - 1);
+
+ if (!strcasecmp(windows_shared_buffer, "_Total"))
+ continue;
+
+ struct mssql_db_instance *mdi = dictionary_set(p->databases, windows_shared_buffer, NULL, sizeof(*mdi));
+ if (!mdi)
+ continue;
+
+ mdi->updated = 0;
+ if (!mdi->parent) {
+ mdi->parent = p;
+ }
+
+ if (perflibGetObjectCounter(pDataBlock, pObjectType, &mdi->MSSQLDatabaseDataFileSize)) {
+ LONGLONG value = (LONGLONG)mdi->MSSQLDatabaseDataFileSize.current.Data;
+ if (value > 0)
+ mdi->updated |= (1 << NETDATA_MSSQL_ENUM_MDI_IDX_FILE_SIZE);
+ }
+
+ if (perflibGetObjectCounter(pDataBlock, pObjectType, &mdi->MSSQLDatabaseActiveTransactions))
+ mdi->updated |= (1 << NETDATA_MSSQL_ENUM_MDI_IDX_ACTIVE_TRANSACTIONS);
+
+ if (perflibGetObjectCounter(pDataBlock, pObjectType, &mdi->MSSQLDatabaseBackupRestoreOperations))
+ mdi->updated |= (1 << NETDATA_MSSQL_ENUM_MDI_IDX_BACKUP_RESTORE_OP);
+
+ if (perflibGetObjectCounter(pDataBlock, pObjectType, &mdi->MSSQLDatabaseLogFlushed))
+ mdi->updated |= (1 << NETDATA_MSSQL_ENUM_MDI_IDX_LOG_FLUSHED);
+
+ if (perflibGetObjectCounter(pDataBlock, pObjectType, &mdi->MSSQLDatabaseLogFlushes))
+ mdi->updated |= (1 << NETDATA_MSSQL_ENUM_MDI_IDX_LOG_FLUSHES);
+
+ if (perflibGetObjectCounter(pDataBlock, pObjectType, &mdi->MSSQLDatabaseTransactions))
+ mdi->updated |= (1 << NETDATA_MSSQL_ENUM_MDI_IDX_TRANSACTIONS);
+
+ if (perflibGetObjectCounter(pDataBlock, pObjectType, &mdi->MSSQLDatabaseWriteTransactions))
+ mdi->updated |= (1 << NETDATA_MSSQL_ENUM_MDI_IDX_WRITE_TRANSACTIONS);
+ }
+
+ dictionary_sorted_walkthrough_read(p->databases, dict_mssql_databases_charts_cb, &update_every);
+}
+
+static void do_mssql_memory_mgr(PERF_DATA_BLOCK *pDataBlock, struct mssql_instance *p, int update_every) {
+ char id[RRD_ID_LENGTH_MAX + 1];
+
+ PERF_OBJECT_TYPE *pObjectType = perflibFindObjectTypeByName(pDataBlock, p->objectName[NETDATA_MSSQL_MEMORY]);
+ if (!pObjectType)
+ return;
+
+ if (perflibGetObjectCounter(pDataBlock, pObjectType, &p->MSSQLConnectionMemoryBytes)) {
+ if (!p->st_conn_memory) {
+ snprintfz(id, RRD_ID_LENGTH_MAX, "instance_%s_memmgr_connection_memory_bytes", p->instanceID);
+ netdata_fix_chart_name(id);
+ p->st_conn_memory = rrdset_create_localhost(
+ "mssql",
+ id,
+ NULL,
+ "memory",
+ "mssql.instance_memmgr_connection_memory_bytes",
+ "Amount of dynamic memory to maintain connections",
+ "bytes",
+ PLUGIN_WINDOWS_NAME,
+ "PerflibMSSQL",
+ PRIO_MSSQL_MEMMGR_CONNECTION_MEMORY_BYTES,
+ update_every,
+ RRDSET_TYPE_LINE);
+
+ p->rd_conn_memory = rrddim_add(p->st_conn_memory, "memory", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+
+ rrdlabels_add(p->st_conn_memory->rrdlabels, "mssql_instance", p->instanceID, RRDLABEL_SRC_AUTO);
+ }
+
+ rrddim_set_by_pointer(
+ p->st_conn_memory,
+ p->rd_conn_memory,
+ (collected_number)(p->MSSQLConnectionMemoryBytes.current.Data * 1024));
+ rrdset_done(p->st_conn_memory);
+ }
+
+ if (perflibGetObjectCounter(pDataBlock, pObjectType, &p->MSSQLExternalBenefitOfMemory)) {
+ if (!p->st_ext_benefit_mem) {
+ snprintfz(id, RRD_ID_LENGTH_MAX, "instance_%s_memmgr_external_benefit_of_memory", p->instanceID);
+ netdata_fix_chart_name(id);
+ p->st_ext_benefit_mem = rrdset_create_localhost(
+ "mssql",
+ id,
+ NULL,
+ "memory",
+ "mssql.instance_memmgr_external_benefit_of_memory",
+ "Performance benefit from adding memory to a specific cache",
+ "bytes",
+ PLUGIN_WINDOWS_NAME,
+ "PerflibMSSQL",
+ PRIO_MSSQL_MEMMGR_EXTERNAL_BENEFIT_OF_MEMORY,
+ update_every,
+ RRDSET_TYPE_LINE);
+
+ p->rd_ext_benefit_mem = rrddim_add(p->st_ext_benefit_mem, "benefit", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+
+ rrdlabels_add(p->st_ext_benefit_mem->rrdlabels, "mssql_instance", p->instanceID, RRDLABEL_SRC_AUTO);
+ }
+
+ rrddim_set_by_pointer(
+ p->st_ext_benefit_mem,
+ p->rd_ext_benefit_mem,
+ (collected_number)p->MSSQLExternalBenefitOfMemory.current.Data);
+ rrdset_done(p->st_ext_benefit_mem);
+ }
+
+ if (perflibGetObjectCounter(pDataBlock, pObjectType, &p->MSSQLPendingMemoryGrants)) {
+ if (!p->st_pending_mem_grant) {
+ snprintfz(id, RRD_ID_LENGTH_MAX, "instance_%s_memmgr_pending_memory_grants", p->instanceID);
+ netdata_fix_chart_name(id);
+ p->st_pending_mem_grant = rrdset_create_localhost(
+ "mssql",
+ id,
+ NULL,
+ "memory",
+ "mssql.instance_memmgr_pending_memory_grants",
+ "Process waiting for memory grant",
+ "processes",
+ PLUGIN_WINDOWS_NAME,
+ "PerflibMSSQL",
+ PRIO_MSSQL_MEMMGR_PENDING_MEMORY_GRANTS,
+ update_every,
+ RRDSET_TYPE_LINE);
+
+ p->rd_pending_mem_grant =
+ rrddim_add(p->st_pending_mem_grant, "pending", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+
+ rrdlabels_add(p->st_pending_mem_grant->rrdlabels, "mssql_instance", p->instanceID, RRDLABEL_SRC_AUTO);
+ }
+
+ rrddim_set_by_pointer(
+ p->st_pending_mem_grant,
+ p->rd_pending_mem_grant,
+ (collected_number)p->MSSQLPendingMemoryGrants.current.Data);
+
+ rrdset_done(p->st_pending_mem_grant);
+ }
+
+ if (perflibGetObjectCounter(pDataBlock, pObjectType, &p->MSSQLTotalServerMemory)) {
+ if (!p->st_mem_tot_server) {
+ snprintfz(id, RRD_ID_LENGTH_MAX, "instance_%s_memmgr_server_memory", p->instanceID);
+ netdata_fix_chart_name(id);
+ p->st_mem_tot_server = rrdset_create_localhost(
+ "mssql",
+ id,
+ NULL,
+ "memory",
+ "mssql.instance_memmgr_server_memory",
+ "Memory committed",
+ "bytes",
+ PLUGIN_WINDOWS_NAME,
+ "PerflibMSSQL",
+ PRIO_MSSQL_MEMMGR_TOTAL_SERVER,
+ update_every,
+ RRDSET_TYPE_LINE);
+
+ p->rd_mem_tot_server = rrddim_add(p->st_mem_tot_server, "memory", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+
+ rrdlabels_add(p->st_mem_tot_server->rrdlabels, "mssql_instance", p->instanceID, RRDLABEL_SRC_AUTO);
+ }
+
+ rrddim_set_by_pointer(
+ p->st_mem_tot_server,
+ p->rd_mem_tot_server,
+ (collected_number)(p->MSSQLTotalServerMemory.current.Data * 1024));
+
+ rrdset_done(p->st_mem_tot_server);
+ }
+}
+
+int dict_mssql_charts_cb(const DICTIONARY_ITEM *item __maybe_unused, void *value, void *data __maybe_unused) {
+ struct mssql_instance *p = value;
+ int *update_every = data;
+
+ static void (*doMSSQL[])(PERF_DATA_BLOCK *, struct mssql_instance *, int) = {
+ do_mssql_general_stats,
+ do_mssql_errors,
+ do_mssql_databases,
+ do_mssql_locks,
+ do_mssql_memory_mgr,
+ do_mssql_buffer_management,
+ do_mssql_sql_statistics,
+ do_mssql_access_methods};
+
+ DWORD i;
+ for (i = 0; i < NETDATA_MSSQL_METRICS_END; i++) {
+ if (!doMSSQL[i])
+ continue;
+
+ DWORD id = RegistryFindIDByName(p->objectName[i]);
+ if (id == PERFLIB_REGISTRY_NAME_NOT_FOUND)
+ return -1;
+
+ PERF_DATA_BLOCK *pDataBlock = perflibGetPerformanceData(id);
+ if (!pDataBlock)
+ return -1;
+
+ doMSSQL[i](pDataBlock, p, *update_every);
+ }
+
+ return 1;
+}
+
+int do_PerflibMSSQL(int update_every, usec_t dt __maybe_unused) {
+ static bool initialized = false;
+
+ if (unlikely(!initialized)) {
+ if (initialize())
+ return -1;
+
+ initialized = true;
+ }
+
+ dictionary_sorted_walkthrough_read(mssql_instances, dict_mssql_charts_cb, &update_every);
+
+ return 0;
+}
diff --git a/src/collectors/windows.plugin/perflib-names.c b/src/collectors/windows.plugin/perflib-names.c
deleted file mode 100644
index 5b47cbce7..000000000
--- a/src/collectors/windows.plugin/perflib-names.c
+++ /dev/null
@@ -1,242 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-#include "perflib.h"
-
-#define REGISTRY_KEY "SOFTWARE\\Microsoft\\Windows NT\\CurrentVersion\\Perflib\\009"
-
-typedef struct perflib_registry {
- DWORD id;
- char *key;
- char *help;
-} perfLibRegistryEntry;
-
-static inline bool compare_perfLibRegistryEntry(const char *k1, const char *k2) {
- return strcmp(k1, k2) == 0;
-}
-
-static inline const char *value2key_perfLibRegistryEntry(perfLibRegistryEntry *entry) {
- return entry->key;
-}
-
-#define SIMPLE_HASHTABLE_COMPARE_KEYS_FUNCTION compare_perfLibRegistryEntry
-#define SIMPLE_HASHTABLE_VALUE2KEY_FUNCTION value2key_perfLibRegistryEntry
-#define SIMPLE_HASHTABLE_KEY_TYPE const char
-#define SIMPLE_HASHTABLE_VALUE_TYPE perfLibRegistryEntry
-#define SIMPLE_HASHTABLE_NAME _PERFLIB
-#include "libnetdata/simple_hashtable.h"
-
-static struct {
- SPINLOCK spinlock;
- size_t size;
- perfLibRegistryEntry **array;
- struct simple_hashtable_PERFLIB hashtable;
- FILETIME lastWriteTime;
-} names_globals = {
- .spinlock = NETDATA_SPINLOCK_INITIALIZER,
- .size = 0,
- .array = NULL,
-};
-
-DWORD RegistryFindIDByName(const char *name) {
- DWORD rc = PERFLIB_REGISTRY_NAME_NOT_FOUND;
-
- spinlock_lock(&names_globals.spinlock);
- XXH64_hash_t hash = XXH3_64bits((void *)name, strlen(name));
- SIMPLE_HASHTABLE_SLOT_PERFLIB *sl = simple_hashtable_get_slot_PERFLIB(&names_globals.hashtable, hash, name, false);
- perfLibRegistryEntry *e = SIMPLE_HASHTABLE_SLOT_DATA(sl);
- if(e) rc = e->id;
- spinlock_unlock(&names_globals.spinlock);
-
- return rc;
-}
-
-static inline void RegistryAddToHashTable_unsafe(perfLibRegistryEntry *entry) {
- XXH64_hash_t hash = XXH3_64bits((void *)entry->key, strlen(entry->key));
- SIMPLE_HASHTABLE_SLOT_PERFLIB *sl = simple_hashtable_get_slot_PERFLIB(&names_globals.hashtable, hash, entry->key, true);
- perfLibRegistryEntry *e = SIMPLE_HASHTABLE_SLOT_DATA(sl);
- if(!e || e->id > entry->id)
- simple_hashtable_set_slot_PERFLIB(&names_globals.hashtable, sl, hash, entry);
-}
-
-static void RegistrySetData_unsafe(DWORD id, const char *key, const char *help) {
- if(id >= names_globals.size) {
- // increase the size of the array
-
- size_t old_size = names_globals.size;
-
- if(!names_globals.size)
- names_globals.size = 20000;
- else
- names_globals.size *= 2;
-
- names_globals.array = reallocz(names_globals.array, names_globals.size * sizeof(perfLibRegistryEntry *));
-
- memset(names_globals.array + old_size, 0, (names_globals.size - old_size) * sizeof(perfLibRegistryEntry *));
- }
-
- perfLibRegistryEntry *entry = names_globals.array[id];
- if(!entry)
- entry = names_globals.array[id] = (perfLibRegistryEntry *)calloc(1, sizeof(perfLibRegistryEntry));
-
- bool add_to_hash = false;
- if(key && !entry->key) {
- entry->key = strdup(key);
- add_to_hash = true;
- }
-
- if(help && !entry->help)
- entry->help = strdup(help);
-
- entry->id = id;
-
- if(add_to_hash)
- RegistryAddToHashTable_unsafe(entry);
-}
-
-const char *RegistryFindNameByID(DWORD id) {
- const char *s = "";
- spinlock_lock(&names_globals.spinlock);
-
- if(id < names_globals.size) {
- perfLibRegistryEntry *titleEntry = names_globals.array[id];
- if(titleEntry && titleEntry->key)
- s = titleEntry->key;
- }
-
- spinlock_unlock(&names_globals.spinlock);
- return s;
-}
-
-const char *RegistryFindHelpByID(DWORD id) {
- const char *s = "";
- spinlock_lock(&names_globals.spinlock);
-
- if(id < names_globals.size) {
- perfLibRegistryEntry *titleEntry = names_globals.array[id];
- if(titleEntry && titleEntry->help)
- s = titleEntry->help;
- }
-
- spinlock_unlock(&names_globals.spinlock);
- return s;
-}
-
-// ----------------------------------------------------------
-
-static inline void readRegistryKeys_unsafe(BOOL helps) {
- TCHAR *pData = NULL;
-
- HKEY hKey;
- DWORD dwType;
- DWORD dwSize = 0;
- LONG lStatus;
-
- LPCSTR valueName;
- if(helps)
- valueName = TEXT("help");
- else
- valueName = TEXT("CounterDefinition");
-
- // Open the key for the English counters
- lStatus = RegOpenKeyEx(HKEY_LOCAL_MACHINE, TEXT(REGISTRY_KEY), 0, KEY_READ, &hKey);
- if (lStatus != ERROR_SUCCESS) {
- nd_log(NDLS_COLLECTORS, NDLP_ERR,
- "Failed to open registry key HKEY_LOCAL_MACHINE, subkey '%s', error %ld\n", REGISTRY_KEY, (long)lStatus);
- return;
- }
-
- // Get the size of the 'Counters' data
- lStatus = RegQueryValueEx(hKey, valueName, NULL, &dwType, NULL, &dwSize);
- if (lStatus != ERROR_SUCCESS) {
- nd_log(NDLS_COLLECTORS, NDLP_ERR,
- "Failed to get registry key HKEY_LOCAL_MACHINE, subkey '%s', value '%s', size of data, error %ld\n",
- REGISTRY_KEY, (const char *)valueName, (long)lStatus);
- goto cleanup;
- }
-
- // Allocate memory for the data
- pData = mallocz(dwSize);
-
- // Read the 'Counters' data
- lStatus = RegQueryValueEx(hKey, valueName, NULL, &dwType, (LPBYTE)pData, &dwSize);
- if (lStatus != ERROR_SUCCESS || dwType != REG_MULTI_SZ) {
- nd_log(NDLS_COLLECTORS, NDLP_ERR,
- "Failed to get registry key HKEY_LOCAL_MACHINE, subkey '%s', value '%s', data, error %ld\n",
- REGISTRY_KEY, (const char *)valueName, (long)lStatus);
- goto cleanup;
- }
-
- // Process the counter data
- TCHAR *ptr = pData;
- while (*ptr) {
- TCHAR *sid = ptr; // First string is the ID
- ptr += lstrlen(ptr) + 1; // Move to the next string
- TCHAR *name = ptr; // Second string is the name
- ptr += lstrlen(ptr) + 1; // Move to the next pair
-
- DWORD id = strtoul(sid, NULL, 10);
-
- if(helps)
- RegistrySetData_unsafe(id, NULL, name);
- else
- RegistrySetData_unsafe(id, name, NULL);
- }
-
-cleanup:
- if(pData) freez(pData);
- RegCloseKey(hKey);
-}
-
-static BOOL RegistryKeyModification(FILETIME *lastWriteTime) {
- HKEY hKey;
- LONG lResult;
- BOOL ret = FALSE;
-
- // Open the registry key
- lResult = RegOpenKeyEx(HKEY_LOCAL_MACHINE, TEXT(REGISTRY_KEY), 0, KEY_READ, &hKey);
- if (lResult != ERROR_SUCCESS) {
- nd_log(NDLS_COLLECTORS, NDLP_ERR,
- "Failed to open registry key HKEY_LOCAL_MACHINE, subkey '%s', error %ld\n", REGISTRY_KEY, (long)lResult);
- return FALSE;
- }
-
- // Get the last write time
- lResult = RegQueryInfoKey(hKey, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, lastWriteTime);
- if (lResult != ERROR_SUCCESS) {
- nd_log(NDLS_COLLECTORS, NDLP_ERR,
- "Failed to query registry key HKEY_LOCAL_MACHINE, subkey '%s', last write time, error %ld\n", REGISTRY_KEY, (long)lResult);
- ret = FALSE;
- }
- else
- ret = TRUE;
-
- RegCloseKey(hKey);
- return ret;
-}
-
-static inline void RegistryFetchAll_unsafe(void) {
- readRegistryKeys_unsafe(FALSE);
- readRegistryKeys_unsafe(TRUE);
-}
-
-void PerflibNamesRegistryInitialize(void) {
- spinlock_lock(&names_globals.spinlock);
- simple_hashtable_init_PERFLIB(&names_globals.hashtable, 20000);
- RegistryKeyModification(&names_globals.lastWriteTime);
- RegistryFetchAll_unsafe();
- spinlock_unlock(&names_globals.spinlock);
-}
-
-void PerflibNamesRegistryUpdate(void) {
- FILETIME lastWriteTime = { 0 };
- RegistryKeyModification(&lastWriteTime);
-
- if(CompareFileTime(&lastWriteTime, &names_globals.lastWriteTime) > 0) {
- spinlock_lock(&names_globals.spinlock);
- if(CompareFileTime(&lastWriteTime, &names_globals.lastWriteTime) > 0) {
- names_globals.lastWriteTime = lastWriteTime;
- RegistryFetchAll_unsafe();
- }
- spinlock_unlock(&names_globals.spinlock);
- }
-}
diff --git a/src/collectors/windows.plugin/perflib-netframework.c b/src/collectors/windows.plugin/perflib-netframework.c
new file mode 100644
index 000000000..28d58cae8
--- /dev/null
+++ b/src/collectors/windows.plugin/perflib-netframework.c
@@ -0,0 +1,796 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "windows_plugin.h"
+#include "windows-internals.h"
+
+enum netdata_netframework_metrics {
+ NETDATA_NETFRAMEWORK_EXCEPTIONS,
+ NETDATA_NETFRAMEWORK_INTEROP,
+ NETDATA_NETFRAMEWORK_JIT,
+ NETDATA_NETFRAMEWORK_LOADING,
+
+ NETDATA_NETFRAMEWORK_END
+};
+
+struct net_framework_instances {
+ RRDSET *st_clrexception_thrown;
+ RRDDIM *rd_clrexception_thrown;
+
+ RRDSET *st_clrexception_filters;
+ RRDDIM *rd_clrexception_filters;
+
+ RRDSET *st_clrexception_finallys;
+ RRDDIM *rd_clrexception_finallys;
+
+ RRDSET *st_clrexception_total_catch_depth;
+ RRDDIM *rd_clrexception_total_catch_depth;
+
+ RRDSET *st_clrinterop_com_callable_wrappers;
+ RRDDIM *rd_clrinterop_com_callable_wrappers;
+
+ RRDSET *st_clrinterop_marshalling;
+ RRDDIM *rd_clrinterop_marshalling;
+
+ RRDSET *st_clrinterop_interop_stubs_created;
+ RRDDIM *rd_clrinterop_interop_stubs_created;
+
+ RRDSET *st_clrjit_methods;
+ RRDDIM *rd_clrjit_methods;
+
+ RRDSET *st_clrjit_time;
+ RRDDIM *rd_clrjit_time;
+
+ RRDSET *st_clrjit_standard_failures;
+ RRDDIM *rd_clrjit_standard_failures;
+
+ RRDSET *st_clrjit_il_bytes;
+ RRDDIM *rd_clrjit_il_bytes;
+
+ RRDSET *st_clrloading_heap_size;
+ RRDDIM *rd_clrloading_heap_size;
+
+ RRDSET *st_clrloading_app_domains_loaded;
+ RRDDIM *rd_clrloading_app_domains_loaded;
+
+ RRDSET *st_clrloading_app_domains_unloaded;
+ RRDDIM *rd_clrloading_app_domains_unloaded;
+
+ RRDSET *st_clrloading_assemblies_loaded;
+ RRDDIM *rd_clrloading_assemblies_loaded;
+
+ RRDSET *st_clrloading_classes_loaded;
+ RRDDIM *rd_clrloading_classes_loaded;
+
+ RRDSET *st_clrloading_class_load_failure;
+ RRDDIM *rd_clrloading_class_load_failure;
+
+ COUNTER_DATA NETFrameworkCLRExceptionThrown;
+ COUNTER_DATA NETFrameworkCLRExceptionFilters;
+ COUNTER_DATA NETFrameworkCLRExceptionFinallys;
+ COUNTER_DATA NETFrameworkCLRExceptionTotalCatchDepth;
+
+ COUNTER_DATA NETFrameworkCLRInteropCOMCallableWrappers;
+ COUNTER_DATA NETFrameworkCLRInteropMarshalling;
+ COUNTER_DATA NETFrameworkCLRInteropStubsCreated;
+
+ COUNTER_DATA NETFrameworkCLRJITMethods;
+ COUNTER_DATA NETFrameworkCLRJITPercentTime;
+ COUNTER_DATA NETFrameworkCLRJITFrequencyTime;
+ COUNTER_DATA NETFrameworkCLRJITStandardFailures;
+ COUNTER_DATA NETFrameworkCLRJITIlBytes;
+
+ COUNTER_DATA NETFrameworkCLRLoadingHeapSize;
+ COUNTER_DATA NETFrameworkCLRLoadingAppDomainsLoaded;
+ COUNTER_DATA NETFrameworkCLRLoadingAppDomainsUnloaded;
+ COUNTER_DATA NETFrameworkCLRLoadingAssembliesLoaded;
+ COUNTER_DATA NETFrameworkCLRLoadingClassesLoaded;
+ COUNTER_DATA NETFrameworkCLRLoadingClassLoadFailure;
+};
+
+static inline void initialize_net_framework_processes_keys(struct net_framework_instances *p) {
+ p->NETFrameworkCLRExceptionFilters.key = "# of Filters / sec";
+ p->NETFrameworkCLRExceptionFinallys.key = "# of Finallys / sec";
+ p->NETFrameworkCLRExceptionThrown.key = "# of Exceps Thrown / sec";
+ p->NETFrameworkCLRExceptionTotalCatchDepth.key = "Throw To Catch Depth / sec";
+
+ p->NETFrameworkCLRInteropCOMCallableWrappers.key = "# of CCWs";
+ p->NETFrameworkCLRInteropMarshalling.key = "# of Stubs";
+ p->NETFrameworkCLRInteropStubsCreated.key = "# of marshalling";
+
+ p->NETFrameworkCLRJITMethods.key = "# of Methods Jitted";
+ p->NETFrameworkCLRJITPercentTime.key = "% Time in Jit";
+ p->NETFrameworkCLRJITFrequencyTime.key = "IL Bytes Jitted / sec";
+ p->NETFrameworkCLRJITStandardFailures.key = "Standard Jit Failures";
+ p->NETFrameworkCLRJITIlBytes.key = "# of IL Bytes Jitted";
+
+ p->NETFrameworkCLRLoadingHeapSize.key = "Bytes in Loader Heap";
+ p->NETFrameworkCLRLoadingAppDomainsLoaded.key = "Rate of appdomains";
+ p->NETFrameworkCLRLoadingAppDomainsUnloaded.key = "Total appdomains unloaded";
+ p->NETFrameworkCLRLoadingAssembliesLoaded.key = "Total Assemblies";
+ p->NETFrameworkCLRLoadingClassesLoaded.key = "Total Classes Loaded";
+ p->NETFrameworkCLRLoadingClassLoadFailure.key = "Total # of Load Failures";
+}
+
+void dict_net_framework_processes_insert_cb(
+ const DICTIONARY_ITEM *item __maybe_unused,
+ void *value,
+ void *data __maybe_unused) {
+ struct net_framework_instances *p = value;
+ initialize_net_framework_processes_keys(p);
+}
+
+static DICTIONARY *processes = NULL;
+
+static void initialize(void) {
+ processes = dictionary_create_advanced(
+ DICT_OPTION_DONT_OVERWRITE_VALUE | DICT_OPTION_FIXED_SIZE, NULL, sizeof(struct net_framework_instances));
+
+ dictionary_register_insert_callback(processes, dict_net_framework_processes_insert_cb, NULL);
+}
+
+static void netdata_framework_clr_exceptions(PERF_DATA_BLOCK *pDataBlock, PERF_OBJECT_TYPE *pObjectType, int update_every) {
+ PERF_INSTANCE_DEFINITION *pi = NULL;
+ char id[RRD_ID_LENGTH_MAX + 1];
+ for (LONG i = 0; i < pObjectType->NumInstances; i++) {
+ pi = perflibForEachInstance(pDataBlock, pObjectType, pi);
+ if (!pi)
+ break;
+
+ if (!getInstanceName(pDataBlock, pObjectType, pi, windows_shared_buffer, sizeof(windows_shared_buffer)))
+ strncpyz(windows_shared_buffer, "[unknown]", sizeof(windows_shared_buffer) - 1);
+
+ if (strcasecmp(windows_shared_buffer, "_Global_") == 0)
+ continue;
+
+ struct net_framework_instances *p = dictionary_set(processes, windows_shared_buffer, NULL, sizeof(*p));
+
+ if (perflibGetObjectCounter(pDataBlock, pObjectType, &p->NETFrameworkCLRExceptionThrown)) {
+ if (!p->st_clrexception_thrown) {
+ snprintfz(id, RRD_ID_LENGTH_MAX, "%s_clrexception_thrown", windows_shared_buffer);
+ netdata_fix_chart_name(id);
+ p->st_clrexception_thrown = rrdset_create_localhost(
+ "netframework",
+ id,
+ NULL,
+ "exceptions",
+ "netframework.clrexception_thrown",
+ "Thrown exceptions",
+ "exceptions/s",
+ PLUGIN_WINDOWS_NAME,
+ "PerflibNetFramework",
+ PRIO_NETFRAMEWORK_CLR_EXCEPTION_THROWN,
+ update_every,
+ RRDSET_TYPE_LINE);
+
+ p->rd_clrexception_thrown =
+ rrddim_add(p->st_clrexception_thrown, "exceptions", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+
+ rrdlabels_add(
+ p->st_clrexception_thrown->rrdlabels, "process", windows_shared_buffer, RRDLABEL_SRC_AUTO);
+ }
+
+ rrddim_set_by_pointer(
+ p->st_clrexception_thrown,
+ p->rd_clrexception_thrown,
+ (collected_number)p->NETFrameworkCLRExceptionThrown.current.Data);
+ rrdset_done(p->st_clrexception_thrown);
+ }
+
+ if (perflibGetObjectCounter(pDataBlock, pObjectType, &p->NETFrameworkCLRExceptionFilters)) {
+ if (!p->st_clrexception_filters) {
+ snprintfz(id, RRD_ID_LENGTH_MAX, "%s_clrexception_filters", windows_shared_buffer);
+ netdata_fix_chart_name(id);
+ p->st_clrexception_filters = rrdset_create_localhost(
+ "netframework",
+ id,
+ NULL,
+ "exceptions",
+ "netframework.clrexception_filters",
+ "Thrown exceptions filters",
+ "filters/s",
+ PLUGIN_WINDOWS_NAME,
+ "PerflibNetFramework",
+ PRIO_NETFRAMEWORK_CLR_EXCEPTION_FILTERS,
+ update_every,
+ RRDSET_TYPE_LINE);
+
+ p->rd_clrexception_filters =
+ rrddim_add(p->st_clrexception_filters, "filters", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+
+ rrdlabels_add(
+ p->st_clrexception_filters->rrdlabels, "process", windows_shared_buffer, RRDLABEL_SRC_AUTO);
+ }
+
+ rrddim_set_by_pointer(
+ p->st_clrexception_filters,
+ p->rd_clrexception_filters,
+ (collected_number)p->NETFrameworkCLRExceptionFilters.current.Data);
+ rrdset_done(p->st_clrexception_filters);
+ }
+
+ if (perflibGetObjectCounter(pDataBlock, pObjectType, &p->NETFrameworkCLRExceptionFinallys)) {
+ if (!p->st_clrexception_finallys) {
+ snprintfz(id, RRD_ID_LENGTH_MAX, "%s_clrexception_finallys", windows_shared_buffer);
+ netdata_fix_chart_name(id);
+ p->st_clrexception_finallys = rrdset_create_localhost(
+ "netframework",
+ id,
+ NULL,
+ "exceptions",
+ "netframework.clrexception_finallys",
+ "Executed finally blocks",
+ "finallys/s",
+ PLUGIN_WINDOWS_NAME,
+ "PerflibNetFramework",
+ PRIO_NETFRAMEWORK_CLR_EXCEPTION_FINALLYS,
+ update_every,
+ RRDSET_TYPE_LINE);
+
+ p->rd_clrexception_finallys =
+ rrddim_add(p->st_clrexception_finallys, "finallys", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+
+ rrdlabels_add(
+ p->st_clrexception_finallys->rrdlabels, "process", windows_shared_buffer, RRDLABEL_SRC_AUTO);
+ }
+
+ rrddim_set_by_pointer(
+ p->st_clrexception_finallys,
+ p->rd_clrexception_finallys,
+ (collected_number)p->NETFrameworkCLRExceptionFinallys.current.Data);
+ rrdset_done(p->st_clrexception_finallys);
+ }
+
+ if (perflibGetObjectCounter(pDataBlock, pObjectType, &p->NETFrameworkCLRExceptionTotalCatchDepth)) {
+ if (!p->st_clrexception_total_catch_depth) {
+ snprintfz(id, RRD_ID_LENGTH_MAX, "%s_clrexception_throw_to_catch_depth", windows_shared_buffer);
+ netdata_fix_chart_name(id);
+ p->st_clrexception_total_catch_depth = rrdset_create_localhost(
+ "netframework",
+ id,
+ NULL,
+ "exceptions",
+ "netframework.clrexception_throw_to_catch_depth",
+ "Traversed stack frames",
+ "stack_frames/s",
+ PLUGIN_WINDOWS_NAME,
+ "PerflibNetFramework",
+ PRIO_NETFRAMEWORK_CLR_EXCEPTION_THROW_TO_CATCH_DEPTH,
+ update_every,
+ RRDSET_TYPE_LINE);
+
+ p->rd_clrexception_total_catch_depth = rrddim_add(
+ p->st_clrexception_total_catch_depth, "traversed", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+
+ rrdlabels_add(
+ p->st_clrexception_total_catch_depth->rrdlabels,
+ "process",
+ windows_shared_buffer,
+ RRDLABEL_SRC_AUTO);
+ }
+
+ rrddim_set_by_pointer(
+ p->st_clrexception_total_catch_depth,
+ p->rd_clrexception_total_catch_depth,
+ (collected_number)p->NETFrameworkCLRExceptionTotalCatchDepth.current.Data);
+ rrdset_done(p->st_clrexception_total_catch_depth);
+ }
+ }
+}
+
+static void netdata_framework_clr_interop(PERF_DATA_BLOCK *pDataBlock, PERF_OBJECT_TYPE *pObjectType, int update_every) {
+ PERF_INSTANCE_DEFINITION *pi = NULL;
+ char id[RRD_ID_LENGTH_MAX + 1];
+ for (LONG i = 0; i < pObjectType->NumInstances; i++) {
+ pi = perflibForEachInstance(pDataBlock, pObjectType, pi);
+ if (!pi)
+ break;
+
+ if (!getInstanceName(pDataBlock, pObjectType, pi, windows_shared_buffer, sizeof(windows_shared_buffer)))
+ strncpyz(windows_shared_buffer, "[unknown]", sizeof(windows_shared_buffer) - 1);
+
+ if (strcasecmp(windows_shared_buffer, "_Global_") == 0)
+ continue;
+
+ struct net_framework_instances *p = dictionary_set(processes, windows_shared_buffer, NULL, sizeof(*p));
+
+ if (perflibGetObjectCounter(pDataBlock, pObjectType, &p->NETFrameworkCLRInteropCOMCallableWrappers)) {
+ if (!p->st_clrinterop_com_callable_wrappers) {
+ snprintfz(id, RRD_ID_LENGTH_MAX, "%s_clrinterop_com_callable_wrappers", windows_shared_buffer);
+ netdata_fix_chart_name(id);
+ p->st_clrinterop_com_callable_wrappers = rrdset_create_localhost(
+ "netframework",
+ id,
+ NULL,
+ "interop",
+ "netframework.clrinterop_com_callable_wrappers",
+ "COM callable wrappers (CCW)",
+ "ccw/s",
+ PLUGIN_WINDOWS_NAME,
+ "PerflibNetFramework",
+ PRIO_NETFRAMEWORK_CLR_INTEROP_CCW,
+ update_every,
+ RRDSET_TYPE_LINE);
+
+ p->rd_clrinterop_com_callable_wrappers = rrddim_add(
+ p->st_clrinterop_com_callable_wrappers,
+ "com_callable_wrappers",
+ NULL,
+ 1,
+ 1,
+ RRD_ALGORITHM_INCREMENTAL);
+
+ rrdlabels_add(
+ p->st_clrinterop_com_callable_wrappers->rrdlabels,
+ "process",
+ windows_shared_buffer,
+ RRDLABEL_SRC_AUTO);
+ }
+
+ rrddim_set_by_pointer(
+ p->st_clrinterop_com_callable_wrappers,
+ p->rd_clrinterop_com_callable_wrappers,
+ (collected_number)p->NETFrameworkCLRInteropCOMCallableWrappers.current.Data);
+ rrdset_done(p->st_clrinterop_com_callable_wrappers);
+ }
+
+ if (perflibGetObjectCounter(pDataBlock, pObjectType, &p->NETFrameworkCLRInteropMarshalling)) {
+ if (!p->st_clrinterop_marshalling) {
+ snprintfz(id, RRD_ID_LENGTH_MAX, "%s_clrinterop_interop_marshalling", windows_shared_buffer);
+ netdata_fix_chart_name(id);
+ p->st_clrinterop_marshalling = rrdset_create_localhost(
+ "netframework",
+ id,
+ NULL,
+ "interop",
+ "netframework.clrinterop_interop_marshallings",
+ "Arguments and return values marshallings",
+ "marshalling/s",
+ PLUGIN_WINDOWS_NAME,
+ "PerflibNetFramework",
+ PRIO_NETFRAMEWORK_CLR_INTEROP_MARSHALLING,
+ update_every,
+ RRDSET_TYPE_LINE);
+
+ p->rd_clrinterop_marshalling =
+ rrddim_add(p->st_clrinterop_marshalling, "marshallings", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+
+ rrdlabels_add(
+ p->st_clrinterop_marshalling->rrdlabels, "process", windows_shared_buffer, RRDLABEL_SRC_AUTO);
+ }
+
+ rrddim_set_by_pointer(
+ p->st_clrinterop_marshalling,
+ p->rd_clrinterop_marshalling,
+ (collected_number)p->NETFrameworkCLRInteropMarshalling.current.Data);
+ rrdset_done(p->st_clrinterop_marshalling);
+ }
+
+ if (perflibGetObjectCounter(pDataBlock, pObjectType, &p->NETFrameworkCLRInteropStubsCreated)) {
+ if (!p->st_clrinterop_interop_stubs_created) {
+ snprintfz(id, RRD_ID_LENGTH_MAX, "%s_clrinterop_interop_stubs_created", windows_shared_buffer);
+ netdata_fix_chart_name(id);
+ p->st_clrinterop_interop_stubs_created = rrdset_create_localhost(
+ "netframework",
+ id,
+ NULL,
+ "interop",
+ "netframework.clrinterop_interop_stubs_created",
+ "Created stubs",
+ "stubs/s",
+ PLUGIN_WINDOWS_NAME,
+ "PerflibNetFramework",
+ PRIO_NETFRAMEWORK_CLR_INTEROP_STUBS_CREATED,
+ update_every,
+ RRDSET_TYPE_LINE);
+
+ p->rd_clrinterop_interop_stubs_created = rrddim_add(
+ p->st_clrinterop_interop_stubs_created, "created", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+
+ rrdlabels_add(
+ p->st_clrinterop_interop_stubs_created->rrdlabels,
+ "process",
+ windows_shared_buffer,
+ RRDLABEL_SRC_AUTO);
+ }
+
+ rrddim_set_by_pointer(
+ p->st_clrinterop_interop_stubs_created,
+ p->rd_clrinterop_interop_stubs_created,
+ (collected_number)p->NETFrameworkCLRInteropStubsCreated.current.Data);
+ rrdset_done(p->st_clrinterop_interop_stubs_created);
+ }
+ }
+}
+
+static void netdata_framework_clr_jit(PERF_DATA_BLOCK *pDataBlock, PERF_OBJECT_TYPE *pObjectType, int update_every) {
+ PERF_INSTANCE_DEFINITION *pi = NULL;
+ char id[RRD_ID_LENGTH_MAX + 1];
+ for (LONG i = 0; i < pObjectType->NumInstances; i++) {
+ pi = perflibForEachInstance(pDataBlock, pObjectType, pi);
+ if (!pi)
+ break;
+
+ if (!getInstanceName(pDataBlock, pObjectType, pi, windows_shared_buffer, sizeof(windows_shared_buffer)))
+ strncpyz(windows_shared_buffer, "[unknown]", sizeof(windows_shared_buffer) - 1);
+
+ if (strcasecmp(windows_shared_buffer, "_Global_") == 0)
+ continue;
+
+ struct net_framework_instances *p = dictionary_set(processes, windows_shared_buffer, NULL, sizeof(*p));
+
+ if (perflibGetObjectCounter(pDataBlock, pObjectType, &p->NETFrameworkCLRJITMethods)) {
+ if (!p->st_clrjit_methods) {
+ snprintfz(id, RRD_ID_LENGTH_MAX, "%s_clrjit_methods", windows_shared_buffer);
+ netdata_fix_chart_name(id);
+ p->st_clrjit_methods = rrdset_create_localhost(
+ "netframework",
+ id,
+ NULL,
+ "jit",
+ "netframework.clrjit_methods",
+ "JIT-compiled methods",
+ "methods/s",
+ PLUGIN_WINDOWS_NAME,
+ "PerflibNetFramework",
+ PRIO_NETFRAMEWORK_CLR_JIT_METHODS,
+ update_every,
+ RRDSET_TYPE_LINE);
+
+ p->rd_clrjit_methods =
+ rrddim_add(p->st_clrjit_methods, "jit-compiled", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+
+ rrdlabels_add(p->st_clrjit_methods->rrdlabels, "process", windows_shared_buffer, RRDLABEL_SRC_AUTO);
+ }
+
+ rrddim_set_by_pointer(
+ p->st_clrjit_methods,
+ p->rd_clrjit_methods,
+ (collected_number)p->NETFrameworkCLRJITMethods.current.Data);
+ rrdset_done(p->st_clrjit_methods);
+ }
+
+ if (perflibGetObjectCounter(pDataBlock, pObjectType, &p->NETFrameworkCLRJITFrequencyTime) &&
+ perflibGetObjectCounter(pDataBlock, pObjectType, &p->NETFrameworkCLRJITPercentTime)) {
+ if (!p->st_clrjit_time) {
+ snprintfz(id, RRD_ID_LENGTH_MAX, "%s_clrjit_time", windows_shared_buffer);
+ netdata_fix_chart_name(id);
+ p->st_clrjit_time = rrdset_create_localhost(
+ "netframework",
+ id,
+ NULL,
+ "jit",
+ "netframework.clrjit_time",
+ "Time spent in JIT compilation",
+ "percentage",
+ PLUGIN_WINDOWS_NAME,
+ "PerflibNetFramework",
+ PRIO_NETFRAMEWORK_CLR_JIT_TIME,
+ update_every,
+ RRDSET_TYPE_LINE);
+
+ p->rd_clrjit_time = rrddim_add(p->st_clrjit_time, "time", NULL, 1, 100, RRD_ALGORITHM_ABSOLUTE);
+
+ rrdlabels_add(p->st_clrjit_time->rrdlabels, "process", windows_shared_buffer, RRDLABEL_SRC_AUTO);
+ }
+
+ double percTime = (double)p->NETFrameworkCLRJITPercentTime.current.Data;
+ percTime /= (double)p->NETFrameworkCLRJITFrequencyTime.current.Data;
+ percTime *= 100;
+ rrddim_set_by_pointer(p->st_clrjit_time, p->rd_clrjit_time, (collected_number)percTime);
+ rrdset_done(p->st_clrjit_time);
+ }
+
+ if (perflibGetObjectCounter(pDataBlock, pObjectType, &p->NETFrameworkCLRJITStandardFailures)) {
+ if (!p->st_clrjit_standard_failures) {
+ snprintfz(id, RRD_ID_LENGTH_MAX, "%s_clrjit_standard_failures", windows_shared_buffer);
+ netdata_fix_chart_name(id);
+ p->st_clrjit_standard_failures = rrdset_create_localhost(
+ "netframework",
+ id,
+ NULL,
+ "jit",
+ "netframework.clrjit_standard_failures",
+ "JIT compiler failures",
+ "failures/s",
+ PLUGIN_WINDOWS_NAME,
+ "PerflibNetFramework",
+ PRIO_NETFRAMEWORK_CLR_JIT_STANDARD_FAILURES,
+ update_every,
+ RRDSET_TYPE_LINE);
+
+ p->rd_clrjit_standard_failures =
+ rrddim_add(p->st_clrjit_standard_failures, "failures", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+
+ rrdlabels_add(
+ p->st_clrjit_standard_failures->rrdlabels, "process", windows_shared_buffer, RRDLABEL_SRC_AUTO);
+ }
+
+ rrddim_set_by_pointer(
+ p->st_clrjit_standard_failures,
+ p->rd_clrjit_standard_failures,
+ (collected_number)p->NETFrameworkCLRJITStandardFailures.current.Data);
+ rrdset_done(p->st_clrjit_standard_failures);
+ }
+
+ if (perflibGetObjectCounter(pDataBlock, pObjectType, &p->NETFrameworkCLRJITIlBytes)) {
+ if (!p->st_clrjit_il_bytes) {
+ snprintfz(id, RRD_ID_LENGTH_MAX, "%s_clrjit_il_bytes", windows_shared_buffer);
+ netdata_fix_chart_name(id);
+ p->st_clrjit_il_bytes = rrdset_create_localhost(
+ "netframework",
+ id,
+ NULL,
+ "jit",
+ "netframework.clrjit_il_bytes",
+ "Compiled Microsoft intermediate language (MSIL) bytes",
+ "bytes/s",
+ PLUGIN_WINDOWS_NAME,
+ "PerflibNetFramework",
+ PRIO_NETFRAMEWORK_CLR_JIT_IL_BYTES,
+ update_every,
+ RRDSET_TYPE_LINE);
+
+ p->rd_clrjit_il_bytes =
+ rrddim_add(p->st_clrjit_il_bytes, "compiled_msil", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+
+ rrdlabels_add(p->st_clrjit_il_bytes->rrdlabels, "process", windows_shared_buffer, RRDLABEL_SRC_AUTO);
+ }
+
+ rrddim_set_by_pointer(
+ p->st_clrjit_il_bytes,
+ p->rd_clrjit_il_bytes,
+ (collected_number)p->NETFrameworkCLRJITIlBytes.current.Data);
+ rrdset_done(p->st_clrjit_il_bytes);
+ }
+ }
+}
+
+static void netdata_framework_clr_loading(PERF_DATA_BLOCK *pDataBlock, PERF_OBJECT_TYPE *pObjectType, int update_every) {
+ char id[RRD_ID_LENGTH_MAX + 1];
+ PERF_INSTANCE_DEFINITION *pi = NULL;
+ for (LONG i = 0; i < pObjectType->NumInstances; i++) {
+ pi = perflibForEachInstance(pDataBlock, pObjectType, pi);
+ if (!pi)
+ break;
+
+ if (!getInstanceName(pDataBlock, pObjectType, pi, windows_shared_buffer, sizeof(windows_shared_buffer)))
+ strncpyz(windows_shared_buffer, "[unknown]", sizeof(windows_shared_buffer) - 1);
+
+ if (strcasecmp(windows_shared_buffer, "_Global_") == 0)
+ continue;
+
+ struct net_framework_instances *p = dictionary_set(processes, windows_shared_buffer, NULL, sizeof(*p));
+
+ if (perflibGetObjectCounter(pDataBlock, pObjectType, &p->NETFrameworkCLRLoadingHeapSize)) {
+ if (!p->st_clrloading_heap_size) {
+ snprintfz(id, RRD_ID_LENGTH_MAX, "%s_clrloading_loader_heap_size", windows_shared_buffer);
+ netdata_fix_chart_name(id);
+ p->st_clrloading_heap_size = rrdset_create_localhost(
+ "netframework",
+ id,
+ NULL,
+ "loading",
+ "netframework.clrloading_loader_heap_size",
+ "Memory committed by class loader",
+ "bytes",
+ PLUGIN_WINDOWS_NAME,
+ "PerflibNetFramework",
+ PRIO_NETFRAMEWORK_CLR_LOADING_HEAP_SIZE,
+ update_every,
+ RRDSET_TYPE_LINE);
+
+ p->rd_clrloading_heap_size =
+ rrddim_add(p->st_clrloading_heap_size, "committed", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+
+ rrdlabels_add(
+ p->st_clrloading_heap_size->rrdlabels, "process", windows_shared_buffer, RRDLABEL_SRC_AUTO);
+ }
+
+ rrddim_set_by_pointer(
+ p->st_clrloading_heap_size,
+ p->rd_clrloading_heap_size,
+ (collected_number)p->NETFrameworkCLRLoadingHeapSize.current.Data);
+ rrdset_done(p->st_clrloading_heap_size);
+ }
+
+ if (perflibGetObjectCounter(pDataBlock, pObjectType, &p->NETFrameworkCLRLoadingAppDomainsLoaded)) {
+ if (!p->st_clrloading_app_domains_loaded) {
+ snprintfz(id, RRD_ID_LENGTH_MAX, "%s_clrloading_appdomains_loaded", windows_shared_buffer);
+ netdata_fix_chart_name(id);
+ p->st_clrloading_app_domains_loaded = rrdset_create_localhost(
+ "netframework",
+ id,
+ NULL,
+ "loading",
+ "netframework.clrloading_appdomains_loaded",
+ "Loaded application domains",
+ "domain/s",
+ PLUGIN_WINDOWS_NAME,
+ "PerflibNetFramework",
+ PRIO_NETFRAMEWORK_CLR_LOADING_APP_DOMAINS_LOADED,
+ update_every,
+ RRDSET_TYPE_LINE);
+
+ p->rd_clrloading_app_domains_loaded =
+ rrddim_add(p->st_clrloading_app_domains_loaded, "loaded", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+
+ rrdlabels_add(
+ p->st_clrloading_app_domains_loaded->rrdlabels, "process", windows_shared_buffer, RRDLABEL_SRC_AUTO);
+ }
+
+ rrddim_set_by_pointer(
+ p->st_clrloading_app_domains_loaded,
+ p->rd_clrloading_app_domains_loaded,
+ (collected_number)p->NETFrameworkCLRLoadingAppDomainsLoaded.current.Data);
+ rrdset_done(p->st_clrloading_app_domains_loaded);
+ }
+
+ if (perflibGetObjectCounter(pDataBlock, pObjectType, &p->NETFrameworkCLRLoadingAppDomainsUnloaded)) {
+ if (!p->st_clrloading_app_domains_unloaded) {
+ snprintfz(id, RRD_ID_LENGTH_MAX, "%s_clrloading_appdomains_unloaded", windows_shared_buffer);
+ netdata_fix_chart_name(id);
+ p->st_clrloading_app_domains_unloaded = rrdset_create_localhost(
+ "netframework",
+ id,
+ NULL,
+ "loading",
+ "netframework.clrloading_appdomains_unloaded",
+ "Unloaded application domains",
+ "domain/s",
+ PLUGIN_WINDOWS_NAME,
+ "PerflibNetFramework",
+ PRIO_NETFRAMEWORK_CLR_LOADING_APP_DOMAINS_UNLOADED,
+ update_every,
+ RRDSET_TYPE_LINE);
+
+ p->rd_clrloading_app_domains_unloaded = rrddim_add(
+ p->st_clrloading_app_domains_unloaded, "unloaded", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+
+ rrdlabels_add(
+ p->st_clrloading_app_domains_unloaded->rrdlabels,
+ "process",
+ windows_shared_buffer,
+ RRDLABEL_SRC_AUTO);
+ }
+
+ rrddim_set_by_pointer(
+ p->st_clrloading_app_domains_unloaded,
+ p->rd_clrloading_app_domains_unloaded,
+ (collected_number)p->NETFrameworkCLRLoadingAppDomainsUnloaded.current.Data);
+ rrdset_done(p->st_clrloading_app_domains_unloaded);
+ }
+
+ if (perflibGetObjectCounter(pDataBlock, pObjectType, &p->NETFrameworkCLRLoadingAssembliesLoaded)) {
+ if (!p->st_clrloading_assemblies_loaded) {
+ snprintfz(id, RRD_ID_LENGTH_MAX, "%s_clrloading_assemblies_loaded", windows_shared_buffer);
+ netdata_fix_chart_name(id);
+ p->st_clrloading_assemblies_loaded = rrdset_create_localhost(
+ "netframework",
+ id,
+ NULL,
+ "loading",
+ "netframework.clrloading_assemblies_loaded",
+ "Loaded assemblies",
+ "assemblies/s",
+ PLUGIN_WINDOWS_NAME,
+ "PerflibNetFramework",
+ PRIO_NETFRAMEWORK_CLR_LOADING_ASSEMBLIES_LOADED,
+ update_every,
+ RRDSET_TYPE_LINE);
+
+ p->rd_clrloading_assemblies_loaded =
+ rrddim_add(p->st_clrloading_assemblies_loaded, "loaded", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+
+ rrdlabels_add(
+ p->st_clrloading_assemblies_loaded->rrdlabels, "process", windows_shared_buffer, RRDLABEL_SRC_AUTO);
+ }
+
+ rrddim_set_by_pointer(
+ p->st_clrloading_assemblies_loaded,
+ p->rd_clrloading_assemblies_loaded,
+ (collected_number)p->NETFrameworkCLRLoadingAssembliesLoaded.current.Data);
+ rrdset_done(p->st_clrloading_assemblies_loaded);
+ }
+
+ if (perflibGetObjectCounter(pDataBlock, pObjectType, &p->NETFrameworkCLRLoadingClassesLoaded)) {
+ if (!p->st_clrloading_classes_loaded) {
+ snprintfz(id, RRD_ID_LENGTH_MAX, "%s_clrloading_classes_loaded", windows_shared_buffer);
+ netdata_fix_chart_name(id);
+ p->st_clrloading_classes_loaded = rrdset_create_localhost(
+ "netframework",
+ id,
+ NULL,
+ "loading",
+ "netframework.clrloading_classes_loaded",
+ "Loaded classes in all assemblies",
+ "classes/s",
+ PLUGIN_WINDOWS_NAME,
+ "PerflibNetFramework",
+ PRIO_NETFRAMEWORK_CLR_LOADING_CLASSES_LOADED,
+ update_every,
+ RRDSET_TYPE_LINE);
+
+ p->rd_clrloading_classes_loaded =
+ rrddim_add(p->st_clrloading_classes_loaded, "loaded", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+
+ rrdlabels_add(
+ p->st_clrloading_classes_loaded->rrdlabels, "process", windows_shared_buffer, RRDLABEL_SRC_AUTO);
+ }
+
+ rrddim_set_by_pointer(
+ p->st_clrloading_classes_loaded,
+ p->rd_clrloading_classes_loaded,
+ (collected_number)p->NETFrameworkCLRLoadingClassesLoaded.current.Data);
+ rrdset_done(p->st_clrloading_classes_loaded);
+ }
+
+ if (perflibGetObjectCounter(pDataBlock, pObjectType, &p->NETFrameworkCLRLoadingClassLoadFailure)) {
+ if (!p->st_clrloading_class_load_failure) {
+ snprintfz(id, RRD_ID_LENGTH_MAX, "%s_clrloading_class_load_failure", windows_shared_buffer);
+ netdata_fix_chart_name(id);
+ p->st_clrloading_class_load_failure = rrdset_create_localhost(
+ "netframework",
+ id,
+ NULL,
+ "loading",
+ "netframework.clrloading_class_load_failures",
+ "Class load failures",
+ "failures/s",
+ PLUGIN_WINDOWS_NAME,
+ "PerflibNetFramework",
+ PRIO_NETFRAMEWORK_CLR_LOADING_CLASS_LOAD_FAILURE,
+ update_every,
+ RRDSET_TYPE_LINE);
+
+ p->rd_clrloading_class_load_failure = rrddim_add(
+ p->st_clrloading_class_load_failure, "class_load", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+
+ rrdlabels_add(
+ p->st_clrloading_class_load_failure->rrdlabels, "process", windows_shared_buffer, RRDLABEL_SRC_AUTO);
+ }
+
+ rrddim_set_by_pointer(
+ p->st_clrloading_class_load_failure,
+ p->rd_clrloading_class_load_failure,
+ (collected_number)p->NETFrameworkCLRLoadingClassLoadFailure.current.Data);
+ rrdset_done(p->st_clrloading_class_load_failure);
+ }
+ }
+}
+
+struct netdata_netframework_objects {
+ char *object;
+ void (*fnct)(PERF_DATA_BLOCK *, PERF_OBJECT_TYPE *, int);
+} netframewrk_obj[NETDATA_NETFRAMEWORK_END] = {
+ {.fnct = netdata_framework_clr_exceptions, .object = ".NET CLR Exceptions"},
+ {.fnct = netdata_framework_clr_interop, .object = ".NET CLR Interop"},
+ {.fnct = netdata_framework_clr_jit, .object = ".NET CLR Jit"},
+ {.fnct = netdata_framework_clr_loading, .object = ".NET CLR Loading"}};
+
+int do_PerflibNetFramework(int update_every, usec_t dt __maybe_unused) {
+ static bool initialized = false;
+
+ if (unlikely(!initialized)) {
+ initialize();
+ initialized = true;
+ }
+
+ int i;
+ for (i = 0; i < NETDATA_NETFRAMEWORK_END; i++) {
+ DWORD id = RegistryFindIDByName(netframewrk_obj[i].object);
+ if (id == PERFLIB_REGISTRY_NAME_NOT_FOUND)
+ continue;
+
+ PERF_DATA_BLOCK *pDataBlock = perflibGetPerformanceData(id);
+ if (!pDataBlock)
+ continue;
+
+ PERF_OBJECT_TYPE *pObjectType = perflibFindObjectTypeByName(pDataBlock, netframewrk_obj[i].object);
+ if (!pObjectType)
+ continue;
+
+ netframewrk_obj[i].fnct(pDataBlock, pObjectType, update_every);
+ }
+
+ return 0;
+}
diff --git a/src/collectors/windows.plugin/perflib-network.c b/src/collectors/windows.plugin/perflib-network.c
index ecadd1e87..55d873b6f 100644
--- a/src/collectors/windows.plugin/perflib-network.c
+++ b/src/collectors/windows.plugin/perflib-network.c
@@ -1,453 +1,1047 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-#include "windows_plugin.h"
-#include "windows-internals.h"
-
-// --------------------------------------------------------------------------------------------------------------------
-// network protocols
-
-struct network_protocol {
- const char *protocol;
-
- struct {
- COUNTER_DATA received;
- COUNTER_DATA sent;
- COUNTER_DATA delivered;
- COUNTER_DATA forwarded;
- RRDSET *st;
- RRDDIM *rd_received;
- RRDDIM *rd_sent;
- RRDDIM *rd_forwarded;
- RRDDIM *rd_delivered;
- const char *type;
- const char *id;
- const char *family;
- const char *context;
- const char *title;
- long priority;
- } packets;
-
-} networks[] = {
- {
- .protocol = "IPv4",
- .packets = {
- .received = { .key = "Datagrams Received/sec" },
- .sent = { .key = "Datagrams Sent/sec" },
- .delivered = { .key = "Datagrams Received Delivered/sec" },
- .forwarded = { .key = "Datagrams Forwarded/sec" },
- .type = "ipv4",
- .id = "packets",
- .family = "packets",
- .context = "ipv4.packets",
- .title = "IPv4 Packets",
- .priority = NETDATA_CHART_PRIO_IPV4_PACKETS,
- },
- },
- {
- .protocol = "IPv6",
- .packets = {
- .received = { .key = "Datagrams Received/sec" },
- .sent = { .key = "Datagrams Sent/sec" },
- .delivered = { .key = "Datagrams Received Delivered/sec" },
- .forwarded = { .key = "Datagrams Forwarded/sec" },
- .type = "ipv6",
- .id = "packets",
- .family = "packets",
- .context = "ip6.packets",
- .title = "IPv6 Packets",
- .priority = NETDATA_CHART_PRIO_IPV6_PACKETS,
- },
- },
- {
- .protocol = "TCPv4",
- .packets = {
- .received = { .key = "Segments Received/sec" },
- .sent = { .key = "Segments Sent/sec" },
- .type = "ipv4",
- .id = "tcppackets",
- .family = "tcp",
- .context = "ipv4.tcppackets",
- .title = "IPv4 TCP Packets",
- .priority = NETDATA_CHART_PRIO_IPV4_TCP_PACKETS,
- },
- },
- {
- .protocol = "TCPv6",
- .packets = {
- .received = { .key = "Segments Received/sec" },
- .sent = { .key = "Segments Sent/sec" },
- .type = "ipv6",
- .id = "tcppackets",
- .family = "tcp6",
- .context = "ipv6.tcppackets",
- .title = "IPv6 TCP Packets",
- .priority = NETDATA_CHART_PRIO_IPV6_TCP_PACKETS,
- },
- },
- {
- .protocol = "UDPv4",
- .packets = {
- .received = { .key = "Datagrams Received/sec" },
- .sent = { .key = "Datagrams Sent/sec" },
- .type = "ipv4",
- .id = "udppackets",
- .family = "udp",
- .context = "ipv4.udppackets",
- .title = "IPv4 UDP Packets",
- .priority = NETDATA_CHART_PRIO_IPV4_UDP_PACKETS,
- },
- },
- {
- .protocol = "UDPv6",
- .packets = {
- .received = { .key = "Datagrams Received/sec" },
- .sent = { .key = "Datagrams Sent/sec" },
- .type = "ipv6",
- .id = "udppackets",
- .family = "udp6",
- .context = "ipv6.udppackets",
- .title = "IPv6 UDP Packets",
- .priority = NETDATA_CHART_PRIO_IPV6_UDP_PACKETS,
- },
- },
- {
- .protocol = "ICMP",
- .packets = {
- .received = { .key = "Messages Received/sec" },
- .sent = { .key = "Messages Sent/sec" },
- .type = "ipv4",
- .id = "icmp",
- .family = "icmp",
- .context = "ipv4.icmp",
- .title = "IPv4 ICMP Packets",
- .priority = NETDATA_CHART_PRIO_IPV4_ICMP_PACKETS,
- },
- },
- {
- .protocol = "ICMPv6",
- .packets = {
- .received = { .key = "Messages Received/sec" },
- .sent = { .key = "Messages Sent/sec" },
- .type = "ipv6",
- .id = "icmp",
- .family = "icmp6",
- .context = "ipv6.icmp",
- .title = "IPv6 ICMP Packets",
- .priority = NETDATA_CHART_PRIO_IPV6_ICMP_PACKETS,
- },
- },
-
- // terminator
- {
- .protocol = NULL,
- }
-};
-
-struct network_protocol tcp46 = {
- .packets = {
- .type = "ip",
- .id = "tcppackets",
- .family = "tcp",
- .context = "ip.tcppackets",
- .title = "TCP Packets",
- .priority = NETDATA_CHART_PRIO_IP_TCP_PACKETS,
- }
-};
-
-static void protocol_packets_chart_update(struct network_protocol *p, int update_every) {
- if(!p->packets.st) {
- p->packets.st = rrdset_create_localhost(
- p->packets.type
- , p->packets.id
- , NULL
- , p->packets.family
- , NULL
- , p->packets.title
- , "packets/s"
- , PLUGIN_WINDOWS_NAME
- , "PerflibNetwork"
- , p->packets.priority
- , update_every
- , RRDSET_TYPE_AREA
- );
-
- p->packets.rd_received = rrddim_add(p->packets.st, "received", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- p->packets.rd_sent = rrddim_add(p->packets.st, "sent", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
-
- if(p->packets.forwarded.key)
- p->packets.rd_forwarded = rrddim_add(p->packets.st, "forwarded", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
-
- if(p->packets.delivered.key)
- p->packets.rd_delivered = rrddim_add(p->packets.st, "delivered", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- }
-
- if(p->packets.received.updated)
- rrddim_set_by_pointer(p->packets.st, p->packets.rd_received, (collected_number)p->packets.received.current.Data);
-
- if(p->packets.sent.updated)
- rrddim_set_by_pointer(p->packets.st, p->packets.rd_sent, (collected_number)p->packets.sent.current.Data);
-
- if(p->packets.forwarded.key && p->packets.forwarded.updated)
- rrddim_set_by_pointer(p->packets.st, p->packets.rd_forwarded, (collected_number)p->packets.forwarded.current.Data);
-
- if(p->packets.delivered.key && p->packets.delivered.updated)
- rrddim_set_by_pointer(p->packets.st, p->packets.rd_delivered, (collected_number)p->packets.delivered.current.Data);
-
- rrdset_done(p->packets.st);
-}
-
-static bool do_network_protocol(PERF_DATA_BLOCK *pDataBlock, int update_every, struct network_protocol *p) {
- if(!p || !p->protocol) return false;
-
- PERF_OBJECT_TYPE *pObjectType = perflibFindObjectTypeByName(pDataBlock, p->protocol);
- if(!pObjectType) return false;
-
- size_t packets = 0;
- if(p->packets.received.key)
- packets += perflibGetObjectCounter(pDataBlock, pObjectType, &p->packets.received) ? 1 : 0;
-
- if(p->packets.sent.key)
- packets += perflibGetObjectCounter(pDataBlock, pObjectType, &p->packets.sent) ? 1 : 0;
-
- if(p->packets.delivered.key)
- packets += perflibGetObjectCounter(pDataBlock, pObjectType, &p->packets.delivered) ? 1 :0;
-
- if(p->packets.forwarded.key)
- packets += perflibGetObjectCounter(pDataBlock, pObjectType, &p->packets.forwarded) ? 1 : 0;
-
- if(packets)
- protocol_packets_chart_update(p, update_every);
-
- return true;
-}
-
-// --------------------------------------------------------------------------------------------------------------------
-// network interfaces
-
-struct network_interface {
- bool collected_metadata;
-
- struct {
- COUNTER_DATA received;
- COUNTER_DATA sent;
-
- RRDSET *st;
- RRDDIM *rd_received;
- RRDDIM *rd_sent;
- } packets;
-
- struct {
- COUNTER_DATA received;
- COUNTER_DATA sent;
-
- RRDSET *st;
- RRDDIM *rd_received;
- RRDDIM *rd_sent;
- } traffic;
-};
-
-static DICTIONARY *physical_interfaces = NULL, *virtual_interfaces = NULL;
-
-static void network_interface_init(struct network_interface *ni) {
- ni->packets.received.key = "Packets Received/sec";
- ni->packets.sent.key = "Packets Sent/sec";
-
- ni->traffic.received.key = "Bytes Received/sec";
- ni->traffic.sent.key = "Bytes Sent/sec";
-}
-
-void dict_interface_insert_cb(const DICTIONARY_ITEM *item __maybe_unused, void *value, void *data __maybe_unused) {
- struct network_interface *ni = value;
- network_interface_init(ni);
-}
-
-static void initialize(void) {
- physical_interfaces = dictionary_create_advanced(DICT_OPTION_DONT_OVERWRITE_VALUE |
- DICT_OPTION_FIXED_SIZE, NULL, sizeof(struct network_interface));
-
- virtual_interfaces = dictionary_create_advanced(DICT_OPTION_DONT_OVERWRITE_VALUE |
- DICT_OPTION_FIXED_SIZE, NULL, sizeof(struct network_interface));
-
- dictionary_register_insert_callback(physical_interfaces, dict_interface_insert_cb, NULL);
- dictionary_register_insert_callback(virtual_interfaces, dict_interface_insert_cb, NULL);
-}
-
-static void add_interface_labels(RRDSET *st, const char *name, bool physical) {
- rrdlabels_add(st->rrdlabels, "device", name, RRDLABEL_SRC_AUTO);
- rrdlabels_add(st->rrdlabels, "interface_type", physical ? "real" : "virtual", RRDLABEL_SRC_AUTO);
-}
-
-static bool is_physical_interface(const char *name) {
- void *d = dictionary_get(physical_interfaces, name);
- return d ? true : false;
-}
-
-static bool do_network_interface(PERF_DATA_BLOCK *pDataBlock, int update_every, bool physical) {
- DICTIONARY *dict = physical_interfaces;
-
- PERF_OBJECT_TYPE *pObjectType = perflibFindObjectTypeByName(pDataBlock, physical ? "Network Interface" : "Network Adapter");
- if(!pObjectType) return false;
-
- uint64_t total_received = 0, total_sent = 0;
-
- PERF_INSTANCE_DEFINITION *pi = NULL;
- for(LONG i = 0; i < pObjectType->NumInstances ; i++) {
- pi = perflibForEachInstance(pDataBlock, pObjectType, pi);
- if(!pi) break;
-
- if(!getInstanceName(pDataBlock, pObjectType, pi, windows_shared_buffer, sizeof(windows_shared_buffer)))
- strncpyz(windows_shared_buffer, "[unknown]", sizeof(windows_shared_buffer) - 1);
-
- if(strcasecmp(windows_shared_buffer, "_Total") == 0)
- continue;
-
- if(!physical && is_physical_interface(windows_shared_buffer))
- // this virtual interface is already reported as physical interface
- continue;
-
- struct network_interface *d = dictionary_set(dict, windows_shared_buffer, NULL, sizeof(*d));
-
- if(!d->collected_metadata) {
- // TODO - get metadata about the network interface
- d->collected_metadata = true;
- }
-
- if(perflibGetInstanceCounter(pDataBlock, pObjectType, pi, &d->traffic.received) &&
- perflibGetInstanceCounter(pDataBlock, pObjectType, pi, &d->traffic.sent)) {
-
- if(d->traffic.received.current.Data == 0 && d->traffic.sent.current.Data == 0)
- // this interface has not received or sent any traffic
- continue;
-
- if (unlikely(!d->traffic.st)) {
- d->traffic.st = rrdset_create_localhost(
- "net",
- windows_shared_buffer,
- NULL,
- windows_shared_buffer,
- "net.net",
- "Bandwidth",
- "kilobits/s",
- PLUGIN_WINDOWS_NAME,
- "PerflibNetwork",
- NETDATA_CHART_PRIO_FIRST_NET_IFACE,
- update_every,
- RRDSET_TYPE_AREA);
-
- rrdset_flag_set(d->traffic.st, RRDSET_FLAG_DETAIL);
-
- add_interface_labels(d->traffic.st, windows_shared_buffer, physical);
-
- d->traffic.rd_received = rrddim_add(d->traffic.st, "received", NULL, 8, BITS_IN_A_KILOBIT, RRD_ALGORITHM_INCREMENTAL);
- d->traffic.rd_sent = rrddim_add(d->traffic.st, "sent", NULL, -8, BITS_IN_A_KILOBIT, RRD_ALGORITHM_INCREMENTAL);
- }
-
- total_received += d->traffic.received.current.Data;
- total_sent += d->traffic.sent.current.Data;
-
- rrddim_set_by_pointer(d->traffic.st, d->traffic.rd_received, (collected_number)d->traffic.received.current.Data);
- rrddim_set_by_pointer(d->traffic.st, d->traffic.rd_sent, (collected_number)d->traffic.sent.current.Data);
- rrdset_done(d->traffic.st);
- }
-
- if(perflibGetInstanceCounter(pDataBlock, pObjectType, pi, &d->packets.received) &&
- perflibGetInstanceCounter(pDataBlock, pObjectType, pi, &d->packets.sent)) {
-
- if (unlikely(!d->packets.st)) {
- d->packets.st = rrdset_create_localhost(
- "net_packets",
- windows_shared_buffer,
- NULL,
- windows_shared_buffer,
- "net.packets",
- "Packets",
- "packets/s",
- PLUGIN_WINDOWS_NAME,
- "PerflibNetwork",
- NETDATA_CHART_PRIO_FIRST_NET_IFACE + 1,
- update_every,
- RRDSET_TYPE_LINE);
-
- rrdset_flag_set(d->packets.st, RRDSET_FLAG_DETAIL);
-
- add_interface_labels(d->traffic.st, windows_shared_buffer, physical);
-
- d->packets.rd_received = rrddim_add(d->packets.st, "received", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- d->packets.rd_sent = rrddim_add(d->packets.st, "sent", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
- }
-
- rrddim_set_by_pointer(d->packets.st, d->packets.rd_received, (collected_number)d->packets.received.current.Data);
- rrddim_set_by_pointer(d->packets.st, d->packets.rd_sent, (collected_number)d->packets.sent.current.Data);
- rrdset_done(d->packets.st);
- }
- }
-
- if(physical) {
- static RRDSET *st = NULL;
- static RRDDIM *rd_received = NULL, *rd_sent = NULL;
-
- if (unlikely(!st)) {
- st = rrdset_create_localhost(
- "system",
- "net",
- NULL,
- "network",
- "system.net",
- "Physical Network Interfaces Aggregated Bandwidth",
- "kilobits/s",
- PLUGIN_WINDOWS_NAME,
- "PerflibNetwork",
- NETDATA_CHART_PRIO_SYSTEM_NET,
- update_every,
- RRDSET_TYPE_AREA);
-
- rd_received = rrddim_add(st, "received", NULL, 8, BITS_IN_A_KILOBIT, RRD_ALGORITHM_INCREMENTAL);
- rd_sent = rrddim_add(st, "sent", NULL, -8, BITS_IN_A_KILOBIT, RRD_ALGORITHM_INCREMENTAL);
- }
-
- rrddim_set_by_pointer(st, rd_received, (collected_number)total_received);
- rrddim_set_by_pointer(st, rd_sent, (collected_number)total_sent);
- rrdset_done(st);
- }
-
- return true;
-}
-
-int do_PerflibNetwork(int update_every, usec_t dt __maybe_unused) {
- static bool initialized = false;
-
- if(unlikely(!initialized)) {
- initialize();
- initialized = true;
- }
-
- DWORD id = RegistryFindIDByName("Network Interface");
- if(id == PERFLIB_REGISTRY_NAME_NOT_FOUND)
- return -1;
-
- PERF_DATA_BLOCK *pDataBlock = perflibGetPerformanceData(id);
- if(!pDataBlock) return -1;
-
- do_network_interface(pDataBlock, update_every, true);
- do_network_interface(pDataBlock, update_every, false);
-
- struct network_protocol *tcp4 = NULL, *tcp6 = NULL;
- for(size_t i = 0; networks[i].protocol ;i++) {
- do_network_protocol(pDataBlock, update_every, &networks[i]);
-
- if(!tcp4 && strcmp(networks[i].protocol, "TCPv4") == 0)
- tcp4 = &networks[i];
- if(!tcp6 && strcmp(networks[i].protocol, "TCPv6") == 0)
- tcp6 = &networks[i];
- }
-
- if(tcp4 && tcp6) {
- tcp46.packets.received = tcp4->packets.received;
- tcp46.packets.sent = tcp4->packets.sent;
- tcp46.packets.received.current.Data += tcp6->packets.received.current.Data;
- tcp46.packets.sent.current.Data += tcp6->packets.sent.current.Data;
- protocol_packets_chart_update(&tcp46, update_every);
- }
-
- return 0;
-}
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "windows_plugin.h"
+#include "windows-internals.h"
+
+#define ADD_PACKET_IF_KEY(p, packets, pDataBlock, pObjectType, counter) \
+ do { \
+ if ((p)->packets.counter.key) { \
+ packets += perflibGetObjectCounter((pDataBlock), (pObjectType), &(p)->packets.counter) ? 1 : 0; \
+ } \
+ } while (0)
+
+#define SET_DIM_IF_KEY_AND_UPDATED(p, field) \
+ do { \
+ if ((p)->packets.field.key && (p)->packets.field.updated) { \
+ rrddim_set_by_pointer( \
+ (p)->packets.st, (p)->packets.rd_##field, (collected_number)(p)->packets.field.current.Data); \
+ } \
+ } while (0)
+
+#define ADD_RRD_DIM_IF_KEY(packet_field, id, name, multiplier, algorithm) \
+ do { \
+ if (p->packets.packet_field.key) \
+ p->packets.rd_##packet_field = rrddim_add(st, id, name, multiplier, 1, algorithm); \
+ } while (0)
+
+// --------------------------------------------------------------------------------------------------------------------
+// network protocols
+
+struct network_protocol {
+ const char *protocol;
+
+ struct {
+ COUNTER_DATA received;
+ COUNTER_DATA sent;
+ COUNTER_DATA delivered;
+ COUNTER_DATA forwarded;
+
+ COUNTER_DATA InDiscards;
+ COUNTER_DATA OutDiscards;
+ COUNTER_DATA InHdrErrors;
+ COUNTER_DATA InAddrErrors;
+ COUNTER_DATA InUnknownProtos;
+ COUNTER_DATA InTooBigErrors;
+ COUNTER_DATA InTruncatedPkts;
+ COUNTER_DATA InNoRoutes;
+ COUNTER_DATA OutNoRoutes;
+
+ COUNTER_DATA InEchoReps;
+ COUNTER_DATA OutEchoReps;
+ COUNTER_DATA InDestUnreachs;
+ COUNTER_DATA OutDestUnreachs;
+ COUNTER_DATA InRedirects;
+ COUNTER_DATA OutRedirects;
+ COUNTER_DATA InEchos;
+ COUNTER_DATA OutEchos;
+ COUNTER_DATA InRouterAdvert;
+ COUNTER_DATA OutRouterAdvert;
+ COUNTER_DATA InRouterSelect;
+ COUNTER_DATA OutRouterSelect;
+ COUNTER_DATA InTimeExcds;
+ COUNTER_DATA OutTimeExcds;
+ COUNTER_DATA InParmProbs;
+ COUNTER_DATA OutParmProbs;
+ COUNTER_DATA InTimestamps;
+ COUNTER_DATA OutTimestamps;
+ COUNTER_DATA InTimestampReps;
+ COUNTER_DATA OutTimestampReps;
+
+ RRDSET *st;
+ RRDDIM *rd_received;
+ RRDDIM *rd_sent;
+ RRDDIM *rd_forwarded;
+ RRDDIM *rd_delivered;
+
+ RRDDIM *rd_InDiscards;
+ RRDDIM *rd_OutDiscards;
+ RRDDIM *rd_InHdrErrors;
+ RRDDIM *rd_InAddrErrors;
+ RRDDIM *rd_InUnknownProtos;
+ RRDDIM *rd_InTooBigErrors;
+ RRDDIM *rd_InTruncatedPkts;
+ RRDDIM *rd_InNoRoutes;
+ RRDDIM *rd_OutNoRoutes;
+
+ RRDDIM *rd_InEchoReps;
+ RRDDIM *rd_OutEchoReps;
+ RRDDIM *rd_InDestUnreachs;
+ RRDDIM *rd_OutDestUnreachs;
+ RRDDIM *rd_InRedirects;
+ RRDDIM *rd_OutRedirects;
+ RRDDIM *rd_InEchos;
+ RRDDIM *rd_OutEchos;
+ RRDDIM *rd_InRouterAdvert;
+ RRDDIM *rd_OutRouterAdvert;
+ RRDDIM *rd_InRouterSelect;
+ RRDDIM *rd_OutRouterSelect;
+ RRDDIM *rd_InTimeExcds;
+ RRDDIM *rd_OutTimeExcds;
+ RRDDIM *rd_InParmProbs;
+ RRDDIM *rd_OutParmProbs;
+ RRDDIM *rd_InTimestamps;
+ RRDDIM *rd_OutTimestamps;
+ RRDDIM *rd_InTimestampReps;
+ RRDDIM *rd_OutTimestampReps;
+
+ const char *type;
+ const char *id;
+ const char *family;
+ const char *context;
+ const char *title;
+ long priority;
+ } packets;
+
+} networks[] = {
+ {
+ .protocol = "IPv4",
+ .packets = {
+ .received = { .key = "Datagrams Received/sec" },
+ .sent = { .key = "Datagrams Sent/sec" },
+ .delivered = { .key = "Datagrams Received Delivered/sec" },
+ .forwarded = { .key = "Datagrams Forwarded/sec" },
+ .type = "ipv4",
+ .id = "packets",
+ .family = "packets",
+ .context = "ipv4.packets",
+ .title = "IPv4 Packets",
+ .priority = NETDATA_CHART_PRIO_IPV4_PACKETS,
+ },
+ },
+ {
+ .protocol = "IPv6",
+ .packets = {
+ .received = { .key = "Datagrams Received/sec" },
+ .sent = { .key = "Datagrams Sent/sec" },
+ .delivered = { .key = "Datagrams Received Delivered/sec" },
+ .forwarded = { .key = "Datagrams Forwarded/sec" },
+ .type = "ipv6",
+ .id = "packets",
+ .family = "packets",
+ .context = "ip6.packets",
+ .title = "IPv6 Packets",
+ .priority = NETDATA_CHART_PRIO_IPV6_PACKETS,
+ },
+ },
+ {
+ .protocol = "TCPv4",
+ .packets = {
+ .received = { .key = "Segments Received/sec" },
+ .sent = { .key = "Segments Sent/sec" },
+ .type = "ipv4",
+ .id = "tcppackets",
+ .family = "tcp",
+ .context = "ipv4.tcppackets",
+ .title = "IPv4 TCP Packets",
+ .priority = NETDATA_CHART_PRIO_IPV4_TCP_PACKETS,
+ },
+ },
+ {
+ .protocol = "TCPv6",
+ .packets = {
+ .received = { .key = "Segments Received/sec" },
+ .sent = { .key = "Segments Sent/sec" },
+ .type = "ipv6",
+ .id = "tcppackets",
+ .family = "tcp6",
+ .context = "ipv6.tcppackets",
+ .title = "IPv6 TCP Packets",
+ .priority = NETDATA_CHART_PRIO_IPV6_TCP_PACKETS,
+ },
+ },
+ {
+ .protocol = "UDPv4",
+ .packets = {
+ .received = { .key = "Datagrams Received/sec" },
+ .sent = { .key = "Datagrams Sent/sec" },
+ .type = "ipv4",
+ .id = "udppackets",
+ .family = "udp",
+ .context = "ipv4.udppackets",
+ .title = "IPv4 UDP Packets",
+ .priority = NETDATA_CHART_PRIO_IPV4_UDP_PACKETS,
+ },
+ },
+ {
+ .protocol = "UDPv6",
+ .packets = {
+ .received = { .key = "Datagrams Received/sec" },
+ .sent = { .key = "Datagrams Sent/sec" },
+ .type = "ipv6",
+ .id = "udppackets",
+ .family = "udp6",
+ .context = "ipv6.udppackets",
+ .title = "IPv6 UDP Packets",
+ .priority = NETDATA_CHART_PRIO_IPV6_UDP_PACKETS,
+ },
+ },
+ {
+ .protocol = "ICMP",
+ .packets = {
+ .received = { .key = "Messages Received/sec" },
+ .sent = { .key = "Messages Sent/sec" },
+ .type = "ipv4",
+ .id = "icmp",
+ .family = "icmp",
+ .context = "ipv4.icmp",
+ .title = "IPv4 ICMP Packets",
+ .priority = NETDATA_CHART_PRIO_IPV4_ICMP_PACKETS,
+ },
+ },
+ {
+ .protocol = "ICMPv6",
+ .packets = {
+ .received = { .key = "Messages Received/sec" },
+ .sent = { .key = "Messages Sent/sec" },
+ .type = "ipv6",
+ .id = "icmp",
+ .family = "icmp6",
+ .context = "ipv6.icmp",
+ .title = "IPv6 ICMP Packets",
+ .priority = NETDATA_CHART_PRIO_IPV6_ICMP_PACKETS,
+ },
+ },
+
+ {
+ .protocol = "IPv4",
+ .packets = {
+ .InDiscards = { .key = "Datagrams Received Discarded" },
+ .OutDiscards = { .key = "Datagrams Outbound Discarded" },
+ .OutNoRoutes = { .key = "Datagrams Outbound No Route" },
+ .InAddrErrors = { .key = "Datagrams Received Address Errors" },
+ .InHdrErrors = { .key = "Datagrams Received Header Errors" },
+ .InUnknownProtos = { .key = "Datagrams Received Unknown Protocol" },
+ .type = "ipv4",
+ .id = "errors",
+ .family = "errors",
+ .context = "ipv4.errors",
+ .title = "IPv4 errors",
+ .priority = NETDATA_CHART_PRIO_IPV4_ERRORS,
+ },
+ },
+ {
+ .protocol = "IPv6",
+ .packets = {
+ .InDiscards = { .key = "Datagrams Received Discarded" },
+ .OutDiscards = { .key = "Datagrams Outbound Discarded" },
+ .OutNoRoutes = { .key = "Datagrams Outbound No Route" },
+ .InAddrErrors = { .key = "Datagrams Received Address Errors" },
+ .InHdrErrors = { .key = "Datagrams Received Header Errors" },
+ .InUnknownProtos = { .key = "Datagrams Received Unknown Protocol" },
+ .type = "ipv6",
+ .id = "errors",
+ .family = "errors",
+ .context = "ipv6.errors",
+ .title = "IPv6 errors",
+ .priority = NETDATA_CHART_PRIO_IPV6_ERRORS,
+ },
+ },
+ {
+ .protocol = "ICMP",
+ .packets =
+ {
+ .InEchoReps = {.key = "Received Echo Reply/sec"},
+ .OutEchoReps = {.key = "Received Echo Reply/sec"},
+ .InDestUnreachs = {.key = "Received Dest. Unreachable"},
+ .OutDestUnreachs = {.key = "Sent Destination Unreachable"},
+ .InRedirects = {.key = "Received Redirect/sec"},
+ .OutRedirects = {.key = "Sent Redirect/sec"},
+ .InEchos = {.key = "Received Echo/sec"},
+ .OutEchos = {.key = "Sent Echo/sec"},
+ .InRouterAdvert = {.key = NULL},
+ .OutRouterAdvert = {.key = NULL},
+ .InRouterSelect = {.key = NULL},
+ .OutRouterSelect = {.key = NULL},
+ .InTimeExcds = {.key = "Received Time Exceeded"},
+ .OutTimeExcds = {.key = "Sent Time Exceeded"},
+ .InParmProbs = {.key = "Received Parameter Problem"},
+ .OutParmProbs = {.key = "Sent Parameter Problem"},
+ .InTimestamps = {.key = "Received Timestamp/sec"},
+ .OutTimestamps = {.key = "Sent Timestamp/sec"},
+ .InTimestampReps = {.key = "Received Timestamp Reply/sec"},
+ .OutTimestampReps = {.key = "Sent Timestamp Reply/sec"},
+
+ .type = "ipv4",
+ .id = "icmpmsg",
+ .family = "icmp",
+ .context = "ipv4.icmpmsg",
+ .title = "IPv4 ICMP Packets",
+ .priority = NETDATA_CHART_PRIO_IPV4_ICMP_MESSAGES,
+ },
+ },
+ {
+ .protocol = "ICMPv6",
+ .packets =
+ {
+ .InEchoReps = {.key = "Received Echo Reply/sec"},
+ .OutEchoReps = {.key = "Received Echo Reply/sec"},
+ .InDestUnreachs = {.key = "Received Dest. Unreachable"},
+ .OutDestUnreachs = {.key = "Sent Destination Unreachable"},
+ .InRedirects = {.key = "Received Redirect/sec"},
+ .OutRedirects = {.key = "Sent Redirect/sec"},
+ .InEchos = {.key = "Received Echo/sec"},
+ .OutEchos = {.key = "Sent Echo/sec"},
+ .InRouterAdvert = {.key = NULL},
+ .OutRouterAdvert = {.key = NULL},
+ .InRouterSelect = {.key = NULL},
+ .OutRouterSelect = {.key = NULL},
+ .InTimeExcds = {.key = "Received Time Exceeded"},
+ .OutTimeExcds = {.key = "Sent Time Exceeded"},
+ .InParmProbs = {.key = "Received Parameter Problem"},
+ .OutParmProbs = {.key = "Sent Parameter Problem"},
+ .InTimestamps = {.key = "Received Timestamp/sec"},
+ .OutTimestamps = {.key = "Sent Timestamp/sec"},
+ .InTimestampReps = {.key = "Received Timestamp Reply/sec"},
+ .OutTimestampReps = {.key = "Sent Timestamp Reply/sec"},
+
+ .type = "ipv6",
+ .id = "icmpmsg",
+ .family = "icmp",
+ .context = "ipv6.icmpmsg",
+ .title = "IPv6 ICMP Packets",
+ .priority = NETDATA_CHART_PRIO_IPV6_ICMP_MESSAGES,
+ },
+ },
+
+ // terminator
+ {
+ .protocol = NULL,
+ }
+};
+
+struct network_protocol tcp46 = {
+ .packets = {
+ .type = "ip",
+ .id = "tcppackets",
+ .family = "tcp",
+ .context = "ip.tcppackets",
+ .title = "TCP Packets",
+ .priority = NETDATA_CHART_PRIO_IP_TCP_PACKETS,
+ }
+};
+
+static void protocol_packets_chart_update(struct network_protocol *p, int update_every) {
+ if(!p->packets.st) {
+ p->packets.st = rrdset_create_localhost(
+ p->packets.type
+ , p->packets.id
+ , NULL
+ , p->packets.family
+ , NULL
+ , p->packets.title
+ , "packets/s"
+ , PLUGIN_WINDOWS_NAME
+ , "PerflibNetwork"
+ , p->packets.priority
+ , update_every
+ , RRDSET_TYPE_AREA
+ );
+
+ RRDSET *st = p->packets.st;
+
+ ADD_RRD_DIM_IF_KEY(received, "received", NULL, 1, RRD_ALGORITHM_INCREMENTAL);
+ ADD_RRD_DIM_IF_KEY(sent, "sent", NULL, -1, RRD_ALGORITHM_INCREMENTAL);
+ ADD_RRD_DIM_IF_KEY(forwarded, "forwarded", NULL, -1, RRD_ALGORITHM_INCREMENTAL);
+ ADD_RRD_DIM_IF_KEY(delivered, "delivered", NULL, 1, RRD_ALGORITHM_INCREMENTAL);
+ ADD_RRD_DIM_IF_KEY(InDiscards, "InDiscards", NULL, 1, RRD_ALGORITHM_INCREMENTAL);
+ ADD_RRD_DIM_IF_KEY(OutDiscards, "OutDiscards", NULL, -1, RRD_ALGORITHM_INCREMENTAL);
+ ADD_RRD_DIM_IF_KEY(InHdrErrors, "InHdrErrors", NULL, 1, RRD_ALGORITHM_INCREMENTAL);
+ ADD_RRD_DIM_IF_KEY(InAddrErrors, "InAddrErrors", NULL, 1, RRD_ALGORITHM_INCREMENTAL);
+ ADD_RRD_DIM_IF_KEY(InUnknownProtos, "InUnknownProtos", NULL, 1, RRD_ALGORITHM_INCREMENTAL);
+ ADD_RRD_DIM_IF_KEY(InTooBigErrors, "InTooBigErrors", NULL, 1, RRD_ALGORITHM_INCREMENTAL);
+ ADD_RRD_DIM_IF_KEY(InTruncatedPkts, "InTruncatedPkts", NULL, 1, RRD_ALGORITHM_INCREMENTAL);
+ ADD_RRD_DIM_IF_KEY(InNoRoutes, "InNoRoutes", NULL, 1, RRD_ALGORITHM_INCREMENTAL);
+ ADD_RRD_DIM_IF_KEY(OutNoRoutes, "OutNoRoutes", NULL, -1, RRD_ALGORITHM_INCREMENTAL);
+ ADD_RRD_DIM_IF_KEY(InEchoReps, "InType0", "InEchoReps", 1, RRD_ALGORITHM_INCREMENTAL);
+ ADD_RRD_DIM_IF_KEY(OutEchoReps, "OutType0", "OutEchoReps", -1, RRD_ALGORITHM_INCREMENTAL);
+ ADD_RRD_DIM_IF_KEY(InDestUnreachs, "InType3", "InDestUnreachs", 1, RRD_ALGORITHM_INCREMENTAL);
+ ADD_RRD_DIM_IF_KEY(OutDestUnreachs, "OutType3", "OutDestUnreachs", -1, RRD_ALGORITHM_INCREMENTAL);
+ ADD_RRD_DIM_IF_KEY(InRedirects, "InType5", "InRedirects", 1, RRD_ALGORITHM_INCREMENTAL);
+ ADD_RRD_DIM_IF_KEY(OutRedirects, "OutType5", "OutRedirects", -1, RRD_ALGORITHM_INCREMENTAL);
+ ADD_RRD_DIM_IF_KEY(InEchos, "InType8", "InEchos", 1, RRD_ALGORITHM_INCREMENTAL);
+ ADD_RRD_DIM_IF_KEY(OutEchos, "OutType8", "OutEchos", -1, RRD_ALGORITHM_INCREMENTAL);
+ ADD_RRD_DIM_IF_KEY(InRouterAdvert, "InType9", "InRouterAdvert", 1, RRD_ALGORITHM_INCREMENTAL);
+ ADD_RRD_DIM_IF_KEY(OutRouterAdvert, "OutType9", "OutRouterAdvert", -1, RRD_ALGORITHM_INCREMENTAL);
+ ADD_RRD_DIM_IF_KEY(InRouterSelect, "InType10", "InRouterSelect", 1, RRD_ALGORITHM_INCREMENTAL);
+ ADD_RRD_DIM_IF_KEY(OutRouterSelect, "OutType10", "OutRouterSelect", -1, RRD_ALGORITHM_INCREMENTAL);
+ ADD_RRD_DIM_IF_KEY(InTimeExcds, "InType11", "InTimeExcds", 1, RRD_ALGORITHM_INCREMENTAL);
+ ADD_RRD_DIM_IF_KEY(OutTimeExcds, "OutType11", "OutTimeExcds", -1, RRD_ALGORITHM_INCREMENTAL);
+ ADD_RRD_DIM_IF_KEY(InParmProbs, "InType12", "InParmProbs", 1, RRD_ALGORITHM_INCREMENTAL);
+ ADD_RRD_DIM_IF_KEY(OutParmProbs, "OutType12", "OutParmProbs", -1, RRD_ALGORITHM_INCREMENTAL);
+ ADD_RRD_DIM_IF_KEY(InTimestamps, "InType13", "InTimestamps", 1, RRD_ALGORITHM_INCREMENTAL);
+ ADD_RRD_DIM_IF_KEY(OutTimestamps, "OutType13", "OutTimestamps", -1, RRD_ALGORITHM_INCREMENTAL);
+ ADD_RRD_DIM_IF_KEY(InTimestampReps, "InType14", "InTimestampReps", 1, RRD_ALGORITHM_INCREMENTAL);
+ ADD_RRD_DIM_IF_KEY(OutTimestampReps, "OutType14", "OutTimestampReps", -1, RRD_ALGORITHM_INCREMENTAL);
+
+ }
+
+ SET_DIM_IF_KEY_AND_UPDATED(p, received);
+ SET_DIM_IF_KEY_AND_UPDATED(p, sent);
+
+ SET_DIM_IF_KEY_AND_UPDATED(p, forwarded);
+ SET_DIM_IF_KEY_AND_UPDATED(p, delivered);
+ SET_DIM_IF_KEY_AND_UPDATED(p, InDiscards);
+ SET_DIM_IF_KEY_AND_UPDATED(p, OutDiscards);
+ SET_DIM_IF_KEY_AND_UPDATED(p, InHdrErrors);
+ SET_DIM_IF_KEY_AND_UPDATED(p, InAddrErrors);
+ SET_DIM_IF_KEY_AND_UPDATED(p, InUnknownProtos);
+ SET_DIM_IF_KEY_AND_UPDATED(p, InTooBigErrors);
+ SET_DIM_IF_KEY_AND_UPDATED(p, InTruncatedPkts);
+ SET_DIM_IF_KEY_AND_UPDATED(p, InNoRoutes);
+ SET_DIM_IF_KEY_AND_UPDATED(p, OutNoRoutes);
+ SET_DIM_IF_KEY_AND_UPDATED(p, InEchoReps);
+ SET_DIM_IF_KEY_AND_UPDATED(p, OutEchoReps);
+ SET_DIM_IF_KEY_AND_UPDATED(p, InDestUnreachs);
+ SET_DIM_IF_KEY_AND_UPDATED(p, OutDestUnreachs);
+ SET_DIM_IF_KEY_AND_UPDATED(p, InRedirects);
+ SET_DIM_IF_KEY_AND_UPDATED(p, OutRedirects);
+ SET_DIM_IF_KEY_AND_UPDATED(p, InEchos);
+ SET_DIM_IF_KEY_AND_UPDATED(p, OutEchos);
+ SET_DIM_IF_KEY_AND_UPDATED(p, InRouterAdvert);
+ SET_DIM_IF_KEY_AND_UPDATED(p, OutRouterAdvert);
+ SET_DIM_IF_KEY_AND_UPDATED(p, InRouterSelect);
+ SET_DIM_IF_KEY_AND_UPDATED(p, OutRouterSelect);
+ SET_DIM_IF_KEY_AND_UPDATED(p, InTimeExcds);
+ SET_DIM_IF_KEY_AND_UPDATED(p, OutTimeExcds);
+ SET_DIM_IF_KEY_AND_UPDATED(p, InParmProbs);
+ SET_DIM_IF_KEY_AND_UPDATED(p, OutParmProbs);
+ SET_DIM_IF_KEY_AND_UPDATED(p, InTimestamps);
+ SET_DIM_IF_KEY_AND_UPDATED(p, OutTimestamps);
+ SET_DIM_IF_KEY_AND_UPDATED(p, InTimestampReps);
+ SET_DIM_IF_KEY_AND_UPDATED(p, OutTimestampReps);
+
+ rrdset_done(p->packets.st);
+}
+
+static bool do_network_protocol(PERF_DATA_BLOCK *pDataBlock, int update_every, struct network_protocol *p) {
+ if(!p || !p->protocol) return false;
+
+ PERF_OBJECT_TYPE *pObjectType = perflibFindObjectTypeByName(pDataBlock, p->protocol);
+ if(!pObjectType) return false;
+
+ size_t packets = 0;
+ ADD_PACKET_IF_KEY(p, packets, pDataBlock, pObjectType, received);
+ ADD_PACKET_IF_KEY(p, packets, pDataBlock, pObjectType, sent);
+ ADD_PACKET_IF_KEY(p, packets, pDataBlock, pObjectType, delivered);
+ ADD_PACKET_IF_KEY(p, packets, pDataBlock, pObjectType, forwarded);
+
+ ADD_PACKET_IF_KEY(p, packets, pDataBlock, pObjectType, InDiscards);
+ ADD_PACKET_IF_KEY(p, packets, pDataBlock, pObjectType, OutDiscards);
+ ADD_PACKET_IF_KEY(p, packets, pDataBlock, pObjectType, InHdrErrors);
+ ADD_PACKET_IF_KEY(p, packets, pDataBlock, pObjectType, InAddrErrors);
+ ADD_PACKET_IF_KEY(p, packets, pDataBlock, pObjectType, InUnknownProtos);
+ ADD_PACKET_IF_KEY(p, packets, pDataBlock, pObjectType, InTooBigErrors);
+ ADD_PACKET_IF_KEY(p, packets, pDataBlock, pObjectType, InTruncatedPkts);
+ ADD_PACKET_IF_KEY(p, packets, pDataBlock, pObjectType, InNoRoutes);
+ ADD_PACKET_IF_KEY(p, packets, pDataBlock, pObjectType, OutNoRoutes);
+ ADD_PACKET_IF_KEY(p, packets, pDataBlock, pObjectType, InEchoReps);
+ ADD_PACKET_IF_KEY(p, packets, pDataBlock, pObjectType, OutEchoReps);
+ ADD_PACKET_IF_KEY(p, packets, pDataBlock, pObjectType, InDestUnreachs);
+ ADD_PACKET_IF_KEY(p, packets, pDataBlock, pObjectType, OutDestUnreachs);
+
+ ADD_PACKET_IF_KEY(p, packets, pDataBlock, pObjectType, InRedirects);
+ ADD_PACKET_IF_KEY(p, packets, pDataBlock, pObjectType, OutRedirects);
+ ADD_PACKET_IF_KEY(p, packets, pDataBlock, pObjectType, InEchos);
+ ADD_PACKET_IF_KEY(p, packets, pDataBlock, pObjectType, OutEchos);
+ ADD_PACKET_IF_KEY(p, packets, pDataBlock, pObjectType, InRouterAdvert);
+ ADD_PACKET_IF_KEY(p, packets, pDataBlock, pObjectType, OutRouterAdvert);
+ ADD_PACKET_IF_KEY(p, packets, pDataBlock, pObjectType, InRouterSelect);
+ ADD_PACKET_IF_KEY(p, packets, pDataBlock, pObjectType, OutRouterSelect);
+ ADD_PACKET_IF_KEY(p, packets, pDataBlock, pObjectType, InTimeExcds);
+ ADD_PACKET_IF_KEY(p, packets, pDataBlock, pObjectType, OutTimeExcds);
+ ADD_PACKET_IF_KEY(p, packets, pDataBlock, pObjectType, InParmProbs);
+ ADD_PACKET_IF_KEY(p, packets, pDataBlock, pObjectType, OutParmProbs);
+ ADD_PACKET_IF_KEY(p, packets, pDataBlock, pObjectType, InTimestamps);
+ ADD_PACKET_IF_KEY(p, packets, pDataBlock, pObjectType, OutTimestamps);
+ ADD_PACKET_IF_KEY(p, packets, pDataBlock, pObjectType, InTimestampReps);
+ ADD_PACKET_IF_KEY(p, packets, pDataBlock, pObjectType, OutTimestampReps);
+
+ if(packets)
+ protocol_packets_chart_update(p, update_every);
+
+ return true;
+}
+
+// --------------------------------------------------------------------------------------------------------------------
+// network interfaces
+
+struct network_interface {
+ usec_t last_collected;
+ bool collected_metadata;
+
+ struct {
+ COUNTER_DATA received;
+ COUNTER_DATA sent;
+
+ RRDSET *st;
+ RRDDIM *rd_received;
+ RRDDIM *rd_sent;
+ } packets;
+
+ struct {
+ const RRDVAR_ACQUIRED *chart_var_speed;
+
+ COUNTER_DATA received;
+ COUNTER_DATA sent;
+
+ RRDSET *st;
+ RRDDIM *rd_received;
+ RRDDIM *rd_sent;
+ } traffic;
+
+ struct {
+ COUNTER_DATA current_bandwidth;
+ RRDSET *st;
+ RRDDIM *rd;
+ } speed;
+
+ struct {
+ COUNTER_DATA received;
+ COUNTER_DATA outbound;
+
+ RRDSET *st;
+ RRDDIM *rd_received;
+ RRDDIM *rd_outbound;
+ } discards;
+
+ struct {
+ COUNTER_DATA received;
+ COUNTER_DATA outbound;
+
+ RRDSET *st;
+ RRDDIM *rd_received;
+ RRDDIM *rd_outbound;
+ } errors;
+
+ struct {
+ COUNTER_DATA length;
+ RRDSET *st;
+ RRDDIM *rd;
+ } queue;
+
+ struct {
+ COUNTER_DATA connections;
+ RRDSET *st;
+ RRDDIM *rd;
+ } chimney;
+
+ struct {
+ COUNTER_DATA connections;
+ COUNTER_DATA packets;
+ COUNTER_DATA exceptions;
+ COUNTER_DATA average_packet_size;
+
+ RRDSET *st_connections;
+ RRDDIM *rd_connections;
+
+ RRDSET *st_packets;
+ RRDDIM *rd_packets;
+
+ RRDSET *st_exceptions;
+ RRDDIM *rd_exceptions;
+
+ RRDSET *st_average_packet_size;
+ RRDDIM *rd_average_packet_size;
+ } rsc;
+};
+
+static DICTIONARY *physical_interfaces = NULL, *virtual_interfaces = NULL;
+
+static void network_interface_init(struct network_interface *d) {
+ d->packets.received.key = "Packets Received/sec";
+ d->packets.sent.key = "Packets Sent/sec";
+ d->traffic.received.key = "Bytes Received/sec";
+ d->traffic.sent.key = "Bytes Sent/sec";
+ d->speed.current_bandwidth.key = "Current Bandwidth";
+ d->discards.received.key = "Packets Received Discarded";
+ d->discards.outbound.key = "Packets Outbound Discarded";
+ d->errors.received.key = "Packets Received Errors";
+ d->errors.outbound.key = "Packets Outbound Errors";
+ d->queue.length.key = "Output Queue Length";
+ d->chimney.connections.key = "Offloaded Connections";
+ d->rsc.connections.key = "TCP Active RSC Connections";
+ d->rsc.packets.key = "TCP RSC Coalesced Packets/sec";
+ d->rsc.exceptions.key = "TCP RSC Exceptions/sec";
+ d->rsc.average_packet_size.key = "TCP RSC Average Packet Size";
+}
+
+static void network_interface_cleanup(struct network_interface *d) {
+ rrdvar_chart_variable_release(d->traffic.st, d->traffic.chart_var_speed);
+ rrdset_is_obsolete___safe_from_collector_thread(d->packets.st);
+ rrdset_is_obsolete___safe_from_collector_thread(d->traffic.st);
+ rrdset_is_obsolete___safe_from_collector_thread(d->speed.st);
+ rrdset_is_obsolete___safe_from_collector_thread(d->discards.st);
+ rrdset_is_obsolete___safe_from_collector_thread(d->errors.st);
+ rrdset_is_obsolete___safe_from_collector_thread(d->queue.st);
+ rrdset_is_obsolete___safe_from_collector_thread(d->chimney.st);
+ rrdset_is_obsolete___safe_from_collector_thread(d->rsc.st_connections);
+ rrdset_is_obsolete___safe_from_collector_thread(d->rsc.st_packets);
+ rrdset_is_obsolete___safe_from_collector_thread(d->rsc.st_exceptions);
+ rrdset_is_obsolete___safe_from_collector_thread(d->rsc.st_average_packet_size);
+}
+
+void dict_interface_insert_cb(const DICTIONARY_ITEM *item __maybe_unused, void *value, void *data __maybe_unused) {
+ struct network_interface *ni = value;
+ network_interface_init(ni);
+}
+
+static void initialize(void) {
+ physical_interfaces = dictionary_create_advanced(DICT_OPTION_DONT_OVERWRITE_VALUE |
+ DICT_OPTION_FIXED_SIZE, NULL, sizeof(struct network_interface));
+
+ virtual_interfaces = dictionary_create_advanced(DICT_OPTION_DONT_OVERWRITE_VALUE |
+ DICT_OPTION_FIXED_SIZE, NULL, sizeof(struct network_interface));
+
+ dictionary_register_insert_callback(physical_interfaces, dict_interface_insert_cb, NULL);
+ dictionary_register_insert_callback(virtual_interfaces, dict_interface_insert_cb, NULL);
+}
+
+static void add_interface_labels(RRDSET *st, const char *name, bool physical) {
+ rrdlabels_add(st->rrdlabels, "device", name, RRDLABEL_SRC_AUTO);
+ rrdlabels_add(st->rrdlabels, "interface_type", physical ? "real" : "virtual", RRDLABEL_SRC_AUTO);
+}
+
+static bool is_physical_interface(const char *name) {
+ void *d = dictionary_get(physical_interfaces, name);
+ return d ? true : false;
+}
+
+static bool do_network_interface(PERF_DATA_BLOCK *pDataBlock, int update_every, bool physical, usec_t now_ut) {
+ DICTIONARY *dict = physical ? physical_interfaces : virtual_interfaces;
+
+ PERF_OBJECT_TYPE *pObjectType = perflibFindObjectTypeByName(pDataBlock, physical ? "Network Interface" : "Network Adapter");
+ if(!pObjectType) return false;
+
+ uint64_t total_received = 0, total_sent = 0;
+
+ PERF_INSTANCE_DEFINITION *pi = NULL;
+ for(LONG i = 0; i < pObjectType->NumInstances ; i++) {
+ pi = perflibForEachInstance(pDataBlock, pObjectType, pi);
+ if(!pi) break;
+
+ if(!getInstanceName(pDataBlock, pObjectType, pi, windows_shared_buffer, sizeof(windows_shared_buffer)))
+ strncpyz(windows_shared_buffer, "[unknown]", sizeof(windows_shared_buffer) - 1);
+
+ if(strcasecmp(windows_shared_buffer, "_Total") == 0)
+ continue;
+
+ if(!physical && is_physical_interface(windows_shared_buffer))
+ // this virtual interface is already reported as physical interface
+ continue;
+
+ struct network_interface *d = dictionary_set(dict, windows_shared_buffer, NULL, sizeof(*d));
+ d->last_collected = now_ut;
+
+ if(!d->collected_metadata) {
+ // TODO - get metadata about the network interface
+ d->collected_metadata = true;
+ }
+
+ if(perflibGetInstanceCounter(pDataBlock, pObjectType, pi, &d->traffic.received) &&
+ perflibGetInstanceCounter(pDataBlock, pObjectType, pi, &d->traffic.sent)) {
+
+ if(d->traffic.received.current.Data == 0 && d->traffic.sent.current.Data == 0)
+ // this interface has not received or sent any traffic yet
+ continue;
+
+ if (unlikely(!d->traffic.st)) {
+ d->traffic.st = rrdset_create_localhost(
+ "net",
+ windows_shared_buffer,
+ NULL,
+ windows_shared_buffer,
+ "net.net",
+ "Bandwidth",
+ "kilobits/s",
+ PLUGIN_WINDOWS_NAME,
+ "PerflibNetwork",
+ NETDATA_CHART_PRIO_FIRST_NET_IFACE,
+ update_every,
+ RRDSET_TYPE_AREA);
+
+ add_interface_labels(d->traffic.st, windows_shared_buffer, physical);
+
+ d->traffic.rd_received = rrddim_add(d->traffic.st, "received", NULL, 8, BITS_IN_A_KILOBIT, RRD_ALGORITHM_INCREMENTAL);
+ d->traffic.rd_sent = rrddim_add(d->traffic.st, "sent", NULL, -8, BITS_IN_A_KILOBIT, RRD_ALGORITHM_INCREMENTAL);
+
+ d->traffic.chart_var_speed = rrdvar_chart_variable_add_and_acquire(d->traffic.st, "nic_speed_max");
+ rrdvar_chart_variable_set(d->traffic.st, d->traffic.chart_var_speed, NAN);
+ }
+
+ total_received += d->traffic.received.current.Data;
+ total_sent += d->traffic.sent.current.Data;
+
+ rrddim_set_by_pointer(d->traffic.st, d->traffic.rd_received, (collected_number)d->traffic.received.current.Data);
+ rrddim_set_by_pointer(d->traffic.st, d->traffic.rd_sent, (collected_number)d->traffic.sent.current.Data);
+ rrdset_done(d->traffic.st);
+ }
+
+ if(perflibGetInstanceCounter(pDataBlock, pObjectType, pi, &d->packets.received) &&
+ perflibGetInstanceCounter(pDataBlock, pObjectType, pi, &d->packets.sent)) {
+
+ if (unlikely(!d->packets.st)) {
+ d->packets.st = rrdset_create_localhost(
+ "net_packets",
+ windows_shared_buffer,
+ NULL,
+ windows_shared_buffer,
+ "net.packets",
+ "Packets",
+ "packets/s",
+ PLUGIN_WINDOWS_NAME,
+ "PerflibNetwork",
+ NETDATA_CHART_PRIO_FIRST_NET_IFACE + 1,
+ update_every,
+ RRDSET_TYPE_LINE);
+
+ add_interface_labels(d->packets.st, windows_shared_buffer, physical);
+
+ d->packets.rd_received = rrddim_add(d->packets.st, "received", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ d->packets.rd_sent = rrddim_add(d->packets.st, "sent", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ }
+
+ rrddim_set_by_pointer(d->packets.st, d->packets.rd_received, (collected_number)d->packets.received.current.Data);
+ rrddim_set_by_pointer(d->packets.st, d->packets.rd_sent, (collected_number)d->packets.sent.current.Data);
+ rrdset_done(d->packets.st);
+ }
+
+ if(perflibGetInstanceCounter(pDataBlock, pObjectType, pi, &d->speed.current_bandwidth)) {
+ if(unlikely(!d->speed.st)) {
+ d->speed.st = rrdset_create_localhost(
+ "net_speed"
+ , windows_shared_buffer
+ , NULL
+ , windows_shared_buffer
+ , "net.speed"
+ , "Interface Speed"
+ , "kilobits/s"
+ , PLUGIN_WINDOWS_NAME
+ , "PerflibNetwork"
+ , NETDATA_CHART_PRIO_FIRST_NET_IFACE + 10
+ , update_every
+ , RRDSET_TYPE_LINE
+ );
+
+ add_interface_labels(d->speed.st, windows_shared_buffer, physical);
+
+ d->speed.rd = rrddim_add(d->speed.st, "speed", NULL, 1, BITS_IN_A_KILOBIT, RRD_ALGORITHM_ABSOLUTE);
+ }
+
+ rrddim_set_by_pointer(d->speed.st, d->speed.rd, (collected_number)d->speed.current_bandwidth.current.Data);
+ rrdset_done(d->speed.st);
+
+ rrdvar_chart_variable_set(d->traffic.st, d->traffic.chart_var_speed,
+ (NETDATA_DOUBLE)d->speed.current_bandwidth.current.Data / BITS_IN_A_KILOBIT);
+ }
+
+ if(perflibGetInstanceCounter(pDataBlock, pObjectType, pi, &d->errors.received) &&
+ perflibGetInstanceCounter(pDataBlock, pObjectType, pi, &d->errors.outbound)) {
+
+ if (unlikely(!d->errors.st)) {
+ d->errors.st = rrdset_create_localhost(
+ "net_errors",
+ windows_shared_buffer,
+ NULL,
+ windows_shared_buffer,
+ "net.errors",
+ "Interface Errors",
+ "errors/s",
+ PLUGIN_WINDOWS_NAME,
+ "PerflibNetwork",
+ NETDATA_CHART_PRIO_FIRST_NET_IFACE + 3,
+ update_every,
+ RRDSET_TYPE_LINE);
+
+ add_interface_labels(d->errors.st, windows_shared_buffer, physical);
+
+ d->errors.rd_received = rrddim_add(d->errors.st, "inbound", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ d->errors.rd_outbound = rrddim_add(d->errors.st, "outbound", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ }
+
+ rrddim_set_by_pointer(d->errors.st, d->errors.rd_received, (collected_number)d->errors.received.current.Data);
+ rrddim_set_by_pointer(d->errors.st, d->errors.rd_outbound, (collected_number)d->errors.outbound.current.Data);
+ rrdset_done(d->errors.st);
+ }
+
+ if(perflibGetInstanceCounter(pDataBlock, pObjectType, pi, &d->discards.received) &&
+ perflibGetInstanceCounter(pDataBlock, pObjectType, pi, &d->discards.outbound)) {
+
+ if (unlikely(!d->discards.st)) {
+ d->discards.st = rrdset_create_localhost(
+ "net_drops",
+ windows_shared_buffer,
+ NULL,
+ windows_shared_buffer,
+ "net.drops",
+ "Interface Drops",
+ "drops/s",
+ PLUGIN_WINDOWS_NAME,
+ "PerflibNetwork",
+ NETDATA_CHART_PRIO_FIRST_NET_IFACE + 4,
+ update_every,
+ RRDSET_TYPE_LINE);
+
+ add_interface_labels(d->discards.st, windows_shared_buffer, physical);
+
+ d->discards.rd_received = rrddim_add(d->discards.st, "inbound", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ d->discards.rd_outbound = rrddim_add(d->discards.st, "outbound", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ }
+
+ rrddim_set_by_pointer(d->discards.st, d->discards.rd_received, (collected_number)d->discards.received.current.Data);
+ rrddim_set_by_pointer(d->discards.st, d->discards.rd_outbound, (collected_number)d->discards.outbound.current.Data);
+ rrdset_done(d->discards.st);
+ }
+
+ if(perflibGetInstanceCounter(pDataBlock, pObjectType, pi, &d->queue.length)) {
+ if (unlikely(!d->queue.st)) {
+ d->queue.st = rrdset_create_localhost(
+ "net_queue_length",
+ windows_shared_buffer,
+ NULL,
+ windows_shared_buffer,
+ "net.queue_length",
+ "Interface Output Queue Length",
+ "packets",
+ PLUGIN_WINDOWS_NAME,
+ "PerflibNetwork",
+ NETDATA_CHART_PRIO_FIRST_NET_IFACE + 5,
+ update_every,
+ RRDSET_TYPE_LINE);
+
+ add_interface_labels(d->queue.st, windows_shared_buffer, physical);
+
+ d->queue.rd = rrddim_add(d->queue.st, "length", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ }
+
+ rrddim_set_by_pointer(d->queue.st, d->queue.rd, (collected_number)d->queue.length.current.Data);
+ rrdset_done(d->queue.st);
+ }
+
+ if(perflibGetInstanceCounter(pDataBlock, pObjectType, pi, &d->rsc.connections)) {
+ if (unlikely(!d->rsc.st_connections)) {
+ d->rsc.st_connections = rrdset_create_localhost(
+ "net_rsc_connections",
+ windows_shared_buffer,
+ NULL,
+ windows_shared_buffer,
+ "net.rsc_connections",
+ "Active TCP Connections Offloaded by RSC",
+ "connections",
+ PLUGIN_WINDOWS_NAME,
+ "PerflibNetwork",
+ NETDATA_CHART_PRIO_FIRST_NET_IFACE + 6,
+ update_every,
+ RRDSET_TYPE_LINE);
+
+ add_interface_labels(d->rsc.st_connections, windows_shared_buffer, physical);
+
+ d->rsc.rd_connections = rrddim_add(d->rsc.st_connections, "connections", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ }
+
+ rrddim_set_by_pointer(d->rsc.st_connections, d->rsc.rd_connections, (collected_number)d->rsc.connections.current.Data);
+ rrdset_done(d->rsc.st_connections);
+ }
+
+ if(perflibGetInstanceCounter(pDataBlock, pObjectType, pi, &d->rsc.packets)) {
+ if (unlikely(!d->rsc.st_packets)) {
+ d->rsc.st_packets = rrdset_create_localhost(
+ "net_rsc_packets",
+ windows_shared_buffer,
+ NULL,
+ windows_shared_buffer,
+ "net.rsc_packets",
+ "TCP RSC Coalesced Packets",
+ "packets/s",
+ PLUGIN_WINDOWS_NAME,
+ "PerflibNetwork",
+ NETDATA_CHART_PRIO_FIRST_NET_IFACE + 7,
+ update_every,
+ RRDSET_TYPE_LINE);
+
+ add_interface_labels(d->rsc.st_packets, windows_shared_buffer, physical);
+
+ d->rsc.rd_packets = rrddim_add(d->rsc.st_packets, "packets", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ }
+
+ rrddim_set_by_pointer(d->rsc.st_packets, d->rsc.rd_packets, (collected_number)d->rsc.packets.current.Data);
+ rrdset_done(d->rsc.st_packets);
+ }
+
+ if(perflibGetInstanceCounter(pDataBlock, pObjectType, pi, &d->rsc.exceptions)) {
+ if (unlikely(!d->rsc.st_exceptions)) {
+ d->rsc.st_exceptions = rrdset_create_localhost(
+ "net_rsc_exceptions",
+ windows_shared_buffer,
+ NULL,
+ windows_shared_buffer,
+ "net.rsc_exceptions",
+ "TCP RSC Exceptions",
+ "exceptions/s",
+ PLUGIN_WINDOWS_NAME,
+ "PerflibNetwork",
+ NETDATA_CHART_PRIO_FIRST_NET_IFACE + 8,
+ update_every,
+ RRDSET_TYPE_LINE);
+
+ add_interface_labels(d->rsc.st_exceptions, windows_shared_buffer, physical);
+
+ d->rsc.rd_exceptions = rrddim_add(d->rsc.st_exceptions, "exceptions", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ }
+
+ rrddim_set_by_pointer(d->rsc.st_exceptions, d->rsc.rd_exceptions, (collected_number)d->rsc.exceptions.current.Data);
+ rrdset_done(d->rsc.st_exceptions);
+ }
+
+ if(perflibGetInstanceCounter(pDataBlock, pObjectType, pi, &d->rsc.average_packet_size)) {
+ if (unlikely(!d->rsc.st_average_packet_size)) {
+ d->rsc.st_average_packet_size = rrdset_create_localhost(
+ "net_rsc_average_packet_size",
+ windows_shared_buffer,
+ NULL,
+ windows_shared_buffer,
+ "net.rsc_average_packet_size",
+ "TCP RSC Average Packet Size",
+ "bytes",
+ PLUGIN_WINDOWS_NAME,
+ "PerflibNetwork",
+ NETDATA_CHART_PRIO_FIRST_NET_IFACE + 9,
+ update_every,
+ RRDSET_TYPE_LINE);
+
+ add_interface_labels(d->rsc.st_average_packet_size, windows_shared_buffer, physical);
+
+ d->rsc.rd_average_packet_size = rrddim_add(d->rsc.st_average_packet_size, "average", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ }
+
+ rrddim_set_by_pointer(d->rsc.st_average_packet_size, d->rsc.rd_average_packet_size, (collected_number)d->rsc.average_packet_size.current.Data);
+ rrdset_done(d->rsc.st_average_packet_size);
+ }
+
+ if(perflibGetInstanceCounter(pDataBlock, pObjectType, pi, &d->chimney.connections)) {
+ if (unlikely(!d->chimney.st)) {
+ d->chimney.st = rrdset_create_localhost(
+ "net_chimney_connections",
+ windows_shared_buffer,
+ NULL,
+ windows_shared_buffer,
+ "net.chimney_connections",
+ "Active TCP Connections Offloaded with Chimney",
+ "connections",
+ PLUGIN_WINDOWS_NAME,
+ "PerflibNetwork",
+ NETDATA_CHART_PRIO_FIRST_NET_IFACE + 10,
+ update_every,
+ RRDSET_TYPE_LINE);
+
+ add_interface_labels(d->chimney.st, windows_shared_buffer, physical);
+
+ d->chimney.rd = rrddim_add(d->chimney.st, "connections", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ }
+
+ rrddim_set_by_pointer(d->chimney.st, d->chimney.rd, (collected_number)d->chimney.connections.current.Data);
+ rrdset_done(d->chimney.st);
+ }
+ }
+
+ if(physical) {
+ static RRDSET *st = NULL;
+ static RRDDIM *rd_received = NULL, *rd_sent = NULL;
+
+ if (unlikely(!st)) {
+ st = rrdset_create_localhost(
+ "system",
+ "net",
+ NULL,
+ "network",
+ "system.net",
+ "Physical Network Interfaces Aggregated Bandwidth",
+ "kilobits/s",
+ PLUGIN_WINDOWS_NAME,
+ "PerflibNetwork",
+ NETDATA_CHART_PRIO_SYSTEM_NET,
+ update_every,
+ RRDSET_TYPE_AREA);
+
+ rd_received = rrddim_add(st, "received", NULL, 8, BITS_IN_A_KILOBIT, RRD_ALGORITHM_INCREMENTAL);
+ rd_sent = rrddim_add(st, "sent", NULL, -8, BITS_IN_A_KILOBIT, RRD_ALGORITHM_INCREMENTAL);
+ }
+
+ rrddim_set_by_pointer(st, rd_received, (collected_number)total_received);
+ rrddim_set_by_pointer(st, rd_sent, (collected_number)total_sent);
+ rrdset_done(st);
+ }
+
+ // cleanup
+ {
+ struct network_interface *d;
+ dfe_start_write(dict, d) {
+ if(d->last_collected < now_ut) {
+ network_interface_cleanup(d);
+ dictionary_del(dict, d_dfe.name);
+ }
+ }
+ dfe_done(d);
+ dictionary_garbage_collect(dict);
+ }
+
+ return true;
+}
+
+int do_PerflibNetwork(int update_every, usec_t dt __maybe_unused) {
+ static bool initialized = false;
+
+ if(unlikely(!initialized)) {
+ initialize();
+ initialized = true;
+ }
+
+ DWORD id = RegistryFindIDByName("Network Interface");
+ if(id == PERFLIB_REGISTRY_NAME_NOT_FOUND)
+ return -1;
+
+ PERF_DATA_BLOCK *pDataBlock = perflibGetPerformanceData(id);
+ if(!pDataBlock) return -1;
+
+ usec_t now_ut = now_monotonic_usec();
+ do_network_interface(pDataBlock, update_every, true, now_ut);
+ do_network_interface(pDataBlock, update_every, false, now_ut);
+
+ struct network_protocol *tcp4 = NULL, *tcp6 = NULL;
+ for(size_t i = 0; networks[i].protocol ;i++) {
+ do_network_protocol(pDataBlock, update_every, &networks[i]);
+
+ if(!tcp4 && strcmp(networks[i].protocol, "TCPv4") == 0)
+ tcp4 = &networks[i];
+ if(!tcp6 && strcmp(networks[i].protocol, "TCPv6") == 0)
+ tcp6 = &networks[i];
+ }
+
+ if(tcp4 && tcp6) {
+ tcp46.packets.received = tcp4->packets.received;
+ tcp46.packets.sent = tcp4->packets.sent;
+ tcp46.packets.received.current.Data += tcp6->packets.received.current.Data;
+ tcp46.packets.sent.current.Data += tcp6->packets.sent.current.Data;
+ protocol_packets_chart_update(&tcp46, update_every);
+ }
+ return 0;
+}
diff --git a/src/collectors/windows.plugin/perflib-objects.c b/src/collectors/windows.plugin/perflib-objects.c
index 6628ff864..cb1bc8d22 100644
--- a/src/collectors/windows.plugin/perflib-objects.c
+++ b/src/collectors/windows.plugin/perflib-objects.c
@@ -1,47 +1,47 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-#include "windows_plugin.h"
-#include "windows-internals.h"
-
-#define _COMMON_PLUGIN_NAME "windows.plugin"
-#define _COMMON_PLUGIN_MODULE_NAME "PerflibObjects"
-#include "../common-contexts/common-contexts.h"
-
-static void initialize(void) {
- ;
-}
-
-static bool do_objects(PERF_DATA_BLOCK *pDataBlock, int update_every) {
- PERF_OBJECT_TYPE *pObjectType = perflibFindObjectTypeByName(pDataBlock, "Objects");
- if (!pObjectType)
- return false;
-
- static COUNTER_DATA semaphores = { .key = "Semaphores" };
-
- if(perflibGetObjectCounter(pDataBlock, pObjectType, &semaphores)) {
- ULONGLONG sem = semaphores.current.Data;
- common_semaphore_ipc(sem, WINDOWS_MAX_KERNEL_OBJECT, _COMMON_PLUGIN_MODULE_NAME, update_every);
- }
-
- return true;
-}
-
-int do_PerflibObjects(int update_every, usec_t dt __maybe_unused) {
- static bool initialized = false;
-
- if(unlikely(!initialized)) {
- initialize();
- initialized = true;
- }
-
- DWORD id = RegistryFindIDByName("Objects");
- if(id == PERFLIB_REGISTRY_NAME_NOT_FOUND)
- return -1;
-
- PERF_DATA_BLOCK *pDataBlock = perflibGetPerformanceData(id);
- if(!pDataBlock) return -1;
-
- do_objects(pDataBlock, update_every);
-
- return 0;
-}
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "windows_plugin.h"
+#include "windows-internals.h"
+
+#define _COMMON_PLUGIN_NAME "windows.plugin"
+#define _COMMON_PLUGIN_MODULE_NAME "PerflibObjects"
+#include "../common-contexts/common-contexts.h"
+
+static void initialize(void) {
+ ;
+}
+
+static bool do_objects(PERF_DATA_BLOCK *pDataBlock, int update_every) {
+ PERF_OBJECT_TYPE *pObjectType = perflibFindObjectTypeByName(pDataBlock, "Objects");
+ if (!pObjectType)
+ return false;
+
+ static COUNTER_DATA semaphores = { .key = "Semaphores" };
+
+ if(perflibGetObjectCounter(pDataBlock, pObjectType, &semaphores)) {
+ ULONGLONG sem = semaphores.current.Data;
+ common_semaphore_ipc(sem, WINDOWS_MAX_KERNEL_OBJECT, _COMMON_PLUGIN_MODULE_NAME, update_every);
+ }
+
+ return true;
+}
+
+int do_PerflibObjects(int update_every, usec_t dt __maybe_unused) {
+ static bool initialized = false;
+
+ if(unlikely(!initialized)) {
+ initialize();
+ initialized = true;
+ }
+
+ DWORD id = RegistryFindIDByName("Objects");
+ if(id == PERFLIB_REGISTRY_NAME_NOT_FOUND)
+ return -1;
+
+ PERF_DATA_BLOCK *pDataBlock = perflibGetPerformanceData(id);
+ if(!pDataBlock) return -1;
+
+ do_objects(pDataBlock, update_every);
+
+ return 0;
+}
diff --git a/src/collectors/windows.plugin/perflib-processes.c b/src/collectors/windows.plugin/perflib-processes.c
index 92aa243b9..70e388eed 100644
--- a/src/collectors/windows.plugin/perflib-processes.c
+++ b/src/collectors/windows.plugin/perflib-processes.c
@@ -1,58 +1,58 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-#include "windows_plugin.h"
-#include "windows-internals.h"
-
-#define _COMMON_PLUGIN_NAME "windows.plugin"
-#define _COMMON_PLUGIN_MODULE_NAME "PerflibProcesses"
-#include "../common-contexts/common-contexts.h"
-
-static void initialize(void) {
- ;
-}
-
-static bool do_processes(PERF_DATA_BLOCK *pDataBlock, int update_every) {
- PERF_OBJECT_TYPE *pObjectType = perflibFindObjectTypeByName(pDataBlock, "System");
- if (!pObjectType)
- return false;
-
- static COUNTER_DATA processesRunning = { .key = "Processes" };
- static COUNTER_DATA contextSwitchPerSec = { .key = "Context Switches/sec" };
- static COUNTER_DATA threads = { .key = "Threads" };
-
- if(perflibGetObjectCounter(pDataBlock, pObjectType, &processesRunning)) {
- ULONGLONG running = processesRunning.current.Data;
- common_system_processes(running, update_every);
- }
-
- if(perflibGetObjectCounter(pDataBlock, pObjectType, &contextSwitchPerSec)) {
- ULONGLONG contexts = contextSwitchPerSec.current.Data;
- common_system_context_switch(contexts, update_every);
- }
-
- if(perflibGetObjectCounter(pDataBlock, pObjectType, &threads)) {
- ULONGLONG totalThreads = threads.current.Data;
- common_system_threads(totalThreads, update_every);
- }
- return true;
-}
-
-int do_PerflibProcesses(int update_every, usec_t dt __maybe_unused) {
- static bool initialized = false;
-
- if(unlikely(!initialized)) {
- initialize();
- initialized = true;
- }
-
- DWORD id = RegistryFindIDByName("System");
- if(id == PERFLIB_REGISTRY_NAME_NOT_FOUND)
- return -1;
-
- PERF_DATA_BLOCK *pDataBlock = perflibGetPerformanceData(id);
- if(!pDataBlock) return -1;
-
- do_processes(pDataBlock, update_every);
-
- return 0;
-}
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "windows_plugin.h"
+#include "windows-internals.h"
+
+#define _COMMON_PLUGIN_NAME "windows.plugin"
+#define _COMMON_PLUGIN_MODULE_NAME "PerflibProcesses"
+#include "../common-contexts/common-contexts.h"
+
+static void initialize(void) {
+ ;
+}
+
+static bool do_processes(PERF_DATA_BLOCK *pDataBlock, int update_every) {
+ PERF_OBJECT_TYPE *pObjectType = perflibFindObjectTypeByName(pDataBlock, "System");
+ if (!pObjectType)
+ return false;
+
+ static COUNTER_DATA processesRunning = { .key = "Processes" };
+ static COUNTER_DATA contextSwitchPerSec = { .key = "Context Switches/sec" };
+ static COUNTER_DATA threads = { .key = "Threads" };
+
+ if(perflibGetObjectCounter(pDataBlock, pObjectType, &processesRunning)) {
+ ULONGLONG running = processesRunning.current.Data;
+ common_system_processes(running, update_every);
+ }
+
+ if(perflibGetObjectCounter(pDataBlock, pObjectType, &contextSwitchPerSec)) {
+ ULONGLONG contexts = contextSwitchPerSec.current.Data;
+ common_system_context_switch(contexts, update_every);
+ }
+
+ if(perflibGetObjectCounter(pDataBlock, pObjectType, &threads)) {
+ ULONGLONG totalThreads = threads.current.Data;
+ common_system_threads(totalThreads, update_every);
+ }
+ return true;
+}
+
+int do_PerflibProcesses(int update_every, usec_t dt __maybe_unused) {
+ static bool initialized = false;
+
+ if(unlikely(!initialized)) {
+ initialize();
+ initialized = true;
+ }
+
+ DWORD id = RegistryFindIDByName("System");
+ if(id == PERFLIB_REGISTRY_NAME_NOT_FOUND)
+ return -1;
+
+ PERF_DATA_BLOCK *pDataBlock = perflibGetPerformanceData(id);
+ if(!pDataBlock) return -1;
+
+ do_processes(pDataBlock, update_every);
+
+ return 0;
+}
diff --git a/src/collectors/windows.plugin/perflib-processor.c b/src/collectors/windows.plugin/perflib-processor.c
index 4c7d86c90..a3df0fced 100644
--- a/src/collectors/windows.plugin/perflib-processor.c
+++ b/src/collectors/windows.plugin/perflib-processor.c
@@ -1,205 +1,205 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-#include "windows_plugin.h"
-#include "windows-internals.h"
-
-#define _COMMON_PLUGIN_NAME "windows.plugin"
-#define _COMMON_PLUGIN_MODULE_NAME "PerflibProcesses"
-#include "../common-contexts/common-contexts.h"
-
-struct processor {
- bool collected_metadata;
-
- RRDSET *st;
- RRDDIM *rd_user;
- RRDDIM *rd_system;
- RRDDIM *rd_irq;
- RRDDIM *rd_dpc;
- RRDDIM *rd_idle;
-
-// RRDSET *st2;
-// RRDDIM *rd2_busy;
-
- COUNTER_DATA percentProcessorTime;
- COUNTER_DATA percentUserTime;
- COUNTER_DATA percentPrivilegedTime;
- COUNTER_DATA percentDPCTime;
- COUNTER_DATA percentInterruptTime;
- COUNTER_DATA percentIdleTime;
-
- COUNTER_DATA interruptsPerSec;
-};
-
-struct processor total = { 0 };
-
-void initialize_processor_keys(struct processor *p) {
- p->percentProcessorTime.key = "% Processor Time";
- p->percentUserTime.key = "% User Time";
- p->percentPrivilegedTime.key = "% Privileged Time";
- p->percentDPCTime.key = "% DPC Time";
- p->percentInterruptTime.key = "% Interrupt Time";
- p->percentIdleTime.key = "% Idle Time";
- p->interruptsPerSec.key = "Interrupts/sec";
-}
-
-void dict_processor_insert_cb(const DICTIONARY_ITEM *item __maybe_unused, void *value, void *data __maybe_unused) {
- struct processor *p = value;
- initialize_processor_keys(p);
-}
-
-static DICTIONARY *processors = NULL;
-
-static void initialize(void) {
- initialize_processor_keys(&total);
-
- processors = dictionary_create_advanced(DICT_OPTION_DONT_OVERWRITE_VALUE |
- DICT_OPTION_FIXED_SIZE, NULL, sizeof(struct processor));
-
- dictionary_register_insert_callback(processors, dict_processor_insert_cb, NULL);
-}
-
-static bool do_processors(PERF_DATA_BLOCK *pDataBlock, int update_every) {
- PERF_OBJECT_TYPE *pObjectType = perflibFindObjectTypeByName(pDataBlock, "Processor");
- if(!pObjectType) return false;
-
- static const RRDVAR_ACQUIRED *cpus_var = NULL;
- int cores_found = 0;
- uint64_t totalIPC = 0;
-
- PERF_INSTANCE_DEFINITION *pi = NULL;
- for(LONG i = 0; i < pObjectType->NumInstances ; i++) {
- pi = perflibForEachInstance(pDataBlock, pObjectType, pi);
- if(!pi) break;
-
- if(!getInstanceName(pDataBlock, pObjectType, pi, windows_shared_buffer, sizeof(windows_shared_buffer)))
- strncpyz(windows_shared_buffer, "[unknown]", sizeof(windows_shared_buffer) - 1);
-
- bool is_total = false;
- struct processor *p;
- int cpu = -1;
- if(strcasecmp(windows_shared_buffer, "_Total") == 0) {
- p = &total;
- is_total = true;
- cpu = -1;
- }
- else {
- p = dictionary_set(processors, windows_shared_buffer, NULL, sizeof(*p));
- is_total = false;
- cpu = str2i(windows_shared_buffer);
- snprintfz(windows_shared_buffer, sizeof(windows_shared_buffer), "cpu%d", cpu);
-
- if(cpu + 1 > cores_found)
- cores_found = cpu + 1;
- }
-
- if(!is_total && !p->collected_metadata) {
- // TODO collect processor metadata
- p->collected_metadata = true;
- }
-
- perflibGetInstanceCounter(pDataBlock, pObjectType, pi, &p->percentProcessorTime);
- perflibGetInstanceCounter(pDataBlock, pObjectType, pi, &p->percentUserTime);
- perflibGetInstanceCounter(pDataBlock, pObjectType, pi, &p->percentPrivilegedTime);
- perflibGetInstanceCounter(pDataBlock, pObjectType, pi, &p->percentDPCTime);
- perflibGetInstanceCounter(pDataBlock, pObjectType, pi, &p->percentInterruptTime);
- perflibGetInstanceCounter(pDataBlock, pObjectType, pi, &p->percentIdleTime);
-
- perflibGetInstanceCounter(pDataBlock, pObjectType, pi, &p->interruptsPerSec);
-
- if(!p->st) {
- p->st = rrdset_create_localhost(
- is_total ? "system" : "cpu"
- , is_total ? "cpu" : windows_shared_buffer, NULL
- , is_total ? "cpu" : "utilization"
- , is_total ? "system.cpu" : "cpu.cpu"
- , is_total ? "Total CPU Utilization" : "Core Utilization"
- , "percentage"
- , PLUGIN_WINDOWS_NAME
- , "PerflibProcessor"
- , is_total ? NETDATA_CHART_PRIO_SYSTEM_CPU : NETDATA_CHART_PRIO_CPU_PER_CORE
- , update_every
- , RRDSET_TYPE_STACKED
- );
-
- p->rd_irq = rrddim_add(p->st, "interrupts", "irq", 1, 1, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL);
- p->rd_user = rrddim_add(p->st, "user", NULL, 1, 1, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL);
- p->rd_system = rrddim_add(p->st, "privileged", "system", 1, 1, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL);
- p->rd_dpc = rrddim_add(p->st, "dpc", NULL, 1, 1, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL);
- p->rd_idle = rrddim_add(p->st, "idle", NULL, 1, 1, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL);
- rrddim_hide(p->st, "idle");
-
- if(!is_total)
- rrdlabels_add(p->st->rrdlabels, "cpu", windows_shared_buffer, RRDLABEL_SRC_AUTO);
- else
- cpus_var = rrdvar_host_variable_add_and_acquire(localhost, "active_processors");
- }
-
- uint64_t user = p->percentUserTime.current.Data;
- uint64_t system = p->percentPrivilegedTime.current.Data;
- uint64_t dpc = p->percentDPCTime.current.Data;
- uint64_t irq = p->percentInterruptTime.current.Data;
- uint64_t idle = p->percentIdleTime.current.Data;
-
- totalIPC += p->interruptsPerSec.current.Data;
-
- rrddim_set_by_pointer(p->st, p->rd_user, (collected_number)user);
- rrddim_set_by_pointer(p->st, p->rd_system, (collected_number)system);
- rrddim_set_by_pointer(p->st, p->rd_irq, (collected_number)irq);
- rrddim_set_by_pointer(p->st, p->rd_dpc, (collected_number)dpc);
- rrddim_set_by_pointer(p->st, p->rd_idle, (collected_number)idle);
- rrdset_done(p->st);
-
-// if(!p->st2) {
-// p->st2 = rrdset_create_localhost(
-// is_total ? "system" : "cpu2"
-// , is_total ? "cpu3" : buffer
-// , NULL
-// , is_total ? "utilization" : buffer
-// , is_total ? "system.cpu3" : "cpu2.cpu"
-// , is_total ? "Total CPU Utilization" : "Core Utilization"
-// , "percentage"
-// , PLUGIN_WINDOWS_NAME
-// , "PerflibProcessor"
-// , is_total ? NETDATA_CHART_PRIO_SYSTEM_CPU : NETDATA_CHART_PRIO_CPU_PER_CORE
-// , update_every
-// , RRDSET_TYPE_STACKED
-// );
-//
-// p->rd2_busy = perflib_rrddim_add(p->st2, "busy", NULL, 1, 1, &p->percentProcessorTime);
-// rrddim_hide(p->st2, "idle");
-//
-// if(!is_total)
-// rrdlabels_add(p->st->rrdlabels, "cpu", buffer, RRDLABEL_SRC_AUTO);
-// }
-//
-// perflib_rrddim_set_by_pointer(p->st2, p->rd2_busy, &p->percentProcessorTime);
-// rrdset_done(p->st2);
- }
-
- if(cpus_var)
- rrdvar_host_variable_set(localhost, cpus_var, cores_found);
-
- common_interrupts(totalIPC, update_every, NULL);
-
- return true;
-}
-
-int do_PerflibProcessor(int update_every, usec_t dt __maybe_unused) {
- static bool initialized = false;
-
- if(unlikely(!initialized)) {
- initialize();
- initialized = true;
- }
-
- DWORD id = RegistryFindIDByName("Processor");
- if(id == PERFLIB_REGISTRY_NAME_NOT_FOUND)
- return -1;
-
- PERF_DATA_BLOCK *pDataBlock = perflibGetPerformanceData(id);
- if(!pDataBlock) return -1;
-
- do_processors(pDataBlock, update_every);
-
- return 0;
-}
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "windows_plugin.h"
+#include "windows-internals.h"
+
+#define _COMMON_PLUGIN_NAME "windows.plugin"
+#define _COMMON_PLUGIN_MODULE_NAME "PerflibProcesses"
+#include "../common-contexts/common-contexts.h"
+
+struct processor {
+ bool collected_metadata;
+
+ RRDSET *st;
+ RRDDIM *rd_user;
+ RRDDIM *rd_system;
+ RRDDIM *rd_irq;
+ RRDDIM *rd_dpc;
+ RRDDIM *rd_idle;
+
+// RRDSET *st2;
+// RRDDIM *rd2_busy;
+
+ COUNTER_DATA percentProcessorTime;
+ COUNTER_DATA percentUserTime;
+ COUNTER_DATA percentPrivilegedTime;
+ COUNTER_DATA percentDPCTime;
+ COUNTER_DATA percentInterruptTime;
+ COUNTER_DATA percentIdleTime;
+
+ COUNTER_DATA interruptsPerSec;
+};
+
+struct processor total = { 0 };
+
+void initialize_processor_keys(struct processor *p) {
+ p->percentProcessorTime.key = "% Processor Time";
+ p->percentUserTime.key = "% User Time";
+ p->percentPrivilegedTime.key = "% Privileged Time";
+ p->percentDPCTime.key = "% DPC Time";
+ p->percentInterruptTime.key = "% Interrupt Time";
+ p->percentIdleTime.key = "% Idle Time";
+ p->interruptsPerSec.key = "Interrupts/sec";
+}
+
+void dict_processor_insert_cb(const DICTIONARY_ITEM *item __maybe_unused, void *value, void *data __maybe_unused) {
+ struct processor *p = value;
+ initialize_processor_keys(p);
+}
+
+static DICTIONARY *processors = NULL;
+
+static void initialize(void) {
+ initialize_processor_keys(&total);
+
+ processors = dictionary_create_advanced(DICT_OPTION_DONT_OVERWRITE_VALUE |
+ DICT_OPTION_FIXED_SIZE, NULL, sizeof(struct processor));
+
+ dictionary_register_insert_callback(processors, dict_processor_insert_cb, NULL);
+}
+
+static bool do_processors(PERF_DATA_BLOCK *pDataBlock, int update_every) {
+ PERF_OBJECT_TYPE *pObjectType = perflibFindObjectTypeByName(pDataBlock, "Processor");
+ if(!pObjectType) return false;
+
+ static const RRDVAR_ACQUIRED *cpus_var = NULL;
+ int cores_found = 0;
+ uint64_t totalIPC = 0;
+
+ PERF_INSTANCE_DEFINITION *pi = NULL;
+ for(LONG i = 0; i < pObjectType->NumInstances ; i++) {
+ pi = perflibForEachInstance(pDataBlock, pObjectType, pi);
+ if(!pi) break;
+
+ if(!getInstanceName(pDataBlock, pObjectType, pi, windows_shared_buffer, sizeof(windows_shared_buffer)))
+ strncpyz(windows_shared_buffer, "[unknown]", sizeof(windows_shared_buffer) - 1);
+
+ bool is_total = false;
+ struct processor *p;
+ int cpu = -1;
+ if(strcasecmp(windows_shared_buffer, "_Total") == 0) {
+ p = &total;
+ is_total = true;
+ cpu = -1;
+ }
+ else {
+ p = dictionary_set(processors, windows_shared_buffer, NULL, sizeof(*p));
+ is_total = false;
+ cpu = str2i(windows_shared_buffer);
+ snprintfz(windows_shared_buffer, sizeof(windows_shared_buffer), "cpu%d", cpu);
+
+ if(cpu + 1 > cores_found)
+ cores_found = cpu + 1;
+ }
+
+ if(!is_total && !p->collected_metadata) {
+ // TODO collect processor metadata
+ p->collected_metadata = true;
+ }
+
+ perflibGetInstanceCounter(pDataBlock, pObjectType, pi, &p->percentProcessorTime);
+ perflibGetInstanceCounter(pDataBlock, pObjectType, pi, &p->percentUserTime);
+ perflibGetInstanceCounter(pDataBlock, pObjectType, pi, &p->percentPrivilegedTime);
+ perflibGetInstanceCounter(pDataBlock, pObjectType, pi, &p->percentDPCTime);
+ perflibGetInstanceCounter(pDataBlock, pObjectType, pi, &p->percentInterruptTime);
+ perflibGetInstanceCounter(pDataBlock, pObjectType, pi, &p->percentIdleTime);
+
+ perflibGetInstanceCounter(pDataBlock, pObjectType, pi, &p->interruptsPerSec);
+
+ if(!p->st) {
+ p->st = rrdset_create_localhost(
+ is_total ? "system" : "cpu"
+ , is_total ? "cpu" : windows_shared_buffer, NULL
+ , is_total ? "cpu" : "utilization"
+ , is_total ? "system.cpu" : "cpu.cpu"
+ , is_total ? "Total CPU Utilization" : "Core Utilization"
+ , "percentage"
+ , PLUGIN_WINDOWS_NAME
+ , "PerflibProcessor"
+ , is_total ? NETDATA_CHART_PRIO_SYSTEM_CPU : NETDATA_CHART_PRIO_CPU_PER_CORE
+ , update_every
+ , RRDSET_TYPE_STACKED
+ );
+
+ p->rd_irq = rrddim_add(p->st, "interrupts", "irq", 1, 1, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL);
+ p->rd_user = rrddim_add(p->st, "user", NULL, 1, 1, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL);
+ p->rd_system = rrddim_add(p->st, "privileged", "system", 1, 1, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL);
+ p->rd_dpc = rrddim_add(p->st, "dpc", NULL, 1, 1, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL);
+ p->rd_idle = rrddim_add(p->st, "idle", NULL, 1, 1, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL);
+ rrddim_hide(p->st, "idle");
+
+ if(!is_total)
+ rrdlabels_add(p->st->rrdlabels, "cpu", windows_shared_buffer, RRDLABEL_SRC_AUTO);
+ else
+ cpus_var = rrdvar_host_variable_add_and_acquire(localhost, "active_processors");
+ }
+
+ uint64_t user = p->percentUserTime.current.Data;
+ uint64_t system = p->percentPrivilegedTime.current.Data;
+ uint64_t dpc = p->percentDPCTime.current.Data;
+ uint64_t irq = p->percentInterruptTime.current.Data;
+ uint64_t idle = p->percentIdleTime.current.Data;
+
+ totalIPC += p->interruptsPerSec.current.Data;
+
+ rrddim_set_by_pointer(p->st, p->rd_user, (collected_number)user);
+ rrddim_set_by_pointer(p->st, p->rd_system, (collected_number)system);
+ rrddim_set_by_pointer(p->st, p->rd_irq, (collected_number)irq);
+ rrddim_set_by_pointer(p->st, p->rd_dpc, (collected_number)dpc);
+ rrddim_set_by_pointer(p->st, p->rd_idle, (collected_number)idle);
+ rrdset_done(p->st);
+
+// if(!p->st2) {
+// p->st2 = rrdset_create_localhost(
+// is_total ? "system" : "cpu2"
+// , is_total ? "cpu3" : buffer
+// , NULL
+// , is_total ? "utilization" : buffer
+// , is_total ? "system.cpu3" : "cpu2.cpu"
+// , is_total ? "Total CPU Utilization" : "Core Utilization"
+// , "percentage"
+// , PLUGIN_WINDOWS_NAME
+// , "PerflibProcessor"
+// , is_total ? NETDATA_CHART_PRIO_SYSTEM_CPU : NETDATA_CHART_PRIO_CPU_PER_CORE
+// , update_every
+// , RRDSET_TYPE_STACKED
+// );
+//
+// p->rd2_busy = perflib_rrddim_add(p->st2, "busy", NULL, 1, 1, &p->percentProcessorTime);
+// rrddim_hide(p->st2, "idle");
+//
+// if(!is_total)
+// rrdlabels_add(p->st->rrdlabels, "cpu", buffer, RRDLABEL_SRC_AUTO);
+// }
+//
+// perflib_rrddim_set_by_pointer(p->st2, p->rd2_busy, &p->percentProcessorTime);
+// rrdset_done(p->st2);
+ }
+
+ if(cpus_var)
+ rrdvar_host_variable_set(localhost, cpus_var, cores_found);
+
+ common_interrupts(totalIPC, update_every, NULL);
+
+ return true;
+}
+
+int do_PerflibProcessor(int update_every, usec_t dt __maybe_unused) {
+ static bool initialized = false;
+
+ if(unlikely(!initialized)) {
+ initialize();
+ initialized = true;
+ }
+
+ DWORD id = RegistryFindIDByName("Processor");
+ if(id == PERFLIB_REGISTRY_NAME_NOT_FOUND)
+ return -1;
+
+ PERF_DATA_BLOCK *pDataBlock = perflibGetPerformanceData(id);
+ if(!pDataBlock) return -1;
+
+ do_processors(pDataBlock, update_every);
+
+ return 0;
+}
diff --git a/src/collectors/windows.plugin/perflib-rrd.c b/src/collectors/windows.plugin/perflib-rrd.c
index d425307ee..5af36ae35 100644
--- a/src/collectors/windows.plugin/perflib-rrd.c
+++ b/src/collectors/windows.plugin/perflib-rrd.c
@@ -1,411 +1,411 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-#include "perflib-rrd.h"
-
-#define COLLECTED_NUMBER_PRECISION 10000
-
-RRDDIM *perflib_rrddim_add(RRDSET *st, const char *id, const char *name, collected_number multiplier, collected_number divider, COUNTER_DATA *cd) {
- RRD_ALGORITHM algorithm = RRD_ALGORITHM_ABSOLUTE;
-
- switch (cd->current.CounterType) {
- case PERF_COUNTER_COUNTER:
- case PERF_SAMPLE_COUNTER:
- case PERF_COUNTER_BULK_COUNT:
- // (N1 - N0) / ((D1 - D0) / F)
- // multiplier *= cd->current.Frequency / 10000000;
- // tested, the frequency is not that useful for netdata
- // we get right results without it.
- algorithm = RRD_ALGORITHM_INCREMENTAL;
- break;
-
- case PERF_COUNTER_QUEUELEN_TYPE:
- case PERF_COUNTER_100NS_QUEUELEN_TYPE:
- case PERF_COUNTER_OBJ_TIME_QUEUELEN_TYPE:
- case PERF_COUNTER_LARGE_QUEUELEN_TYPE:
- case PERF_AVERAGE_BULK: // normally not displayed
- // (N1 - N0) / (D1 - D0)
- algorithm = RRD_ALGORITHM_INCREMENTAL;
- break;
-
- case PERF_OBJ_TIME_TIMER:
- case PERF_COUNTER_TIMER:
- case PERF_100NSEC_TIMER:
- case PERF_PRECISION_SYSTEM_TIMER:
- case PERF_PRECISION_100NS_TIMER:
- case PERF_PRECISION_OBJECT_TIMER:
- case PERF_SAMPLE_FRACTION:
- // 100 * (N1 - N0) / (D1 - D0)
- multiplier *= 100;
- algorithm = RRD_ALGORITHM_INCREMENTAL;
- break;
-
- case PERF_COUNTER_TIMER_INV:
- case PERF_100NSEC_TIMER_INV:
- // 100 * (1 - ((N1 - N0) / (D1 - D0)))
- divider *= COLLECTED_NUMBER_PRECISION;
- algorithm = RRD_ALGORITHM_ABSOLUTE;
- break;
-
- case PERF_COUNTER_MULTI_TIMER:
- // 100 * ((N1 - N0) / ((D1 - D0) / TB)) / B1
- divider *= COLLECTED_NUMBER_PRECISION;
- algorithm = RRD_ALGORITHM_ABSOLUTE;
- break;
-
- case PERF_100NSEC_MULTI_TIMER:
- // 100 * ((N1 - N0) / (D1 - D0)) / B1
- divider *= COLLECTED_NUMBER_PRECISION;
- algorithm = RRD_ALGORITHM_ABSOLUTE;
- break;
-
- case PERF_COUNTER_MULTI_TIMER_INV:
- case PERF_100NSEC_MULTI_TIMER_INV:
- // 100 * (B1 - ((N1 - N0) / (D1 - D0)))
- divider *= COLLECTED_NUMBER_PRECISION;
- algorithm = RRD_ALGORITHM_ABSOLUTE;
- break;
-
- case PERF_COUNTER_RAWCOUNT:
- case PERF_COUNTER_LARGE_RAWCOUNT:
- // N as decimal
- algorithm = RRD_ALGORITHM_ABSOLUTE;
- break;
-
- case PERF_COUNTER_RAWCOUNT_HEX:
- case PERF_COUNTER_LARGE_RAWCOUNT_HEX:
- // N as hexadecimal
- algorithm = RRD_ALGORITHM_ABSOLUTE;
- break;
-
- case PERF_COUNTER_DELTA:
- case PERF_COUNTER_LARGE_DELTA:
- // N1 - N0
- algorithm = RRD_ALGORITHM_ABSOLUTE;
- break;
-
- case PERF_RAW_FRACTION:
- case PERF_LARGE_RAW_FRACTION:
- // 100 * N / B
- algorithm = RRD_ALGORITHM_ABSOLUTE;
- divider *= COLLECTED_NUMBER_PRECISION;
- break;
-
- case PERF_AVERAGE_TIMER:
- // ((N1 - N0) / TB) / (B1 - B0)
- // divider *= cd->current.Frequency / 10000000;
- algorithm = RRD_ALGORITHM_INCREMENTAL;
- break;
-
- case PERF_ELAPSED_TIME:
- // (D0 - N0) / F
- algorithm = RRD_ALGORITHM_ABSOLUTE;
- break;
-
- case PERF_COUNTER_TEXT:
- case PERF_SAMPLE_BASE:
- case PERF_AVERAGE_BASE:
- case PERF_COUNTER_MULTI_BASE:
- case PERF_RAW_BASE:
- case PERF_COUNTER_NODATA:
- case PERF_PRECISION_TIMESTAMP:
- default:
- break;
- }
-
- return rrddim_add(st, id, name, multiplier, divider, algorithm);
-}
-
-#define VALID_DELTA(cd) \
- ((cd)->previous.Time > 0 && (cd)->current.Data >= (cd)->previous.Data && (cd)->current.Time > (cd)->previous.Time)
-
-collected_number perflib_rrddim_set_by_pointer(RRDSET *st, RRDDIM *rd, COUNTER_DATA *cd) {
- ULONGLONG numerator = 0;
- LONGLONG denominator = 0;
- double doubleValue = 0.0;
- collected_number value;
-
- switch(cd->current.CounterType) {
- case PERF_COUNTER_COUNTER:
- case PERF_SAMPLE_COUNTER:
- case PERF_COUNTER_BULK_COUNT:
- // (N1 - N0) / ((D1 - D0) / F)
- value = (collected_number)cd->current.Data;
- break;
-
- case PERF_COUNTER_QUEUELEN_TYPE:
- case PERF_COUNTER_100NS_QUEUELEN_TYPE:
- case PERF_COUNTER_OBJ_TIME_QUEUELEN_TYPE:
- case PERF_COUNTER_LARGE_QUEUELEN_TYPE:
- case PERF_AVERAGE_BULK: // normally not displayed
- // (N1 - N0) / (D1 - D0)
- value = (collected_number)cd->current.Data;
- break;
-
- case PERF_OBJ_TIME_TIMER:
- case PERF_COUNTER_TIMER:
- case PERF_100NSEC_TIMER:
- case PERF_PRECISION_SYSTEM_TIMER:
- case PERF_PRECISION_100NS_TIMER:
- case PERF_PRECISION_OBJECT_TIMER:
- case PERF_SAMPLE_FRACTION:
- // 100 * (N1 - N0) / (D1 - D0)
- value = (collected_number)cd->current.Data;
- break;
-
- case PERF_COUNTER_TIMER_INV:
- case PERF_100NSEC_TIMER_INV:
- // 100 * (1 - ((N1 - N0) / (D1 - D0)))
- if(!VALID_DELTA(cd)) return 0;
- numerator = cd->current.Data - cd->previous.Data;
- denominator = cd->current.Time - cd->previous.Time;
- doubleValue = 100.0 * (1.0 - ((double)numerator / (double)denominator));
- // printf("Display value is (timer-inv): %f%%\n", doubleValue);
- value = (collected_number)(doubleValue * COLLECTED_NUMBER_PRECISION);
- break;
-
- case PERF_COUNTER_MULTI_TIMER:
- // 100 * ((N1 - N0) / ((D1 - D0) / TB)) / B1
- if(!VALID_DELTA(cd)) return 0;
- numerator = cd->current.Data - cd->previous.Data;
- denominator = cd->current.Time - cd->previous.Time;
- denominator /= cd->current.Frequency;
- doubleValue = 100.0 * ((double)numerator / (double)denominator) / cd->current.MultiCounterData;
- // printf("Display value is (multi-timer): %f%%\n", doubleValue);
- value = (collected_number)(doubleValue * COLLECTED_NUMBER_PRECISION);
- break;
-
- case PERF_100NSEC_MULTI_TIMER:
- // 100 * ((N1 - N0) / (D1 - D0)) / B1
- if(!VALID_DELTA(cd)) return 0;
- numerator = cd->current.Data - cd->previous.Data;
- denominator = cd->current.Time - cd->previous.Time;
- doubleValue = 100.0 * ((double)numerator / (double)denominator) / (double)cd->current.MultiCounterData;
- // printf("Display value is (100ns multi-timer): %f%%\n", doubleValue);
- value = (collected_number)(doubleValue * COLLECTED_NUMBER_PRECISION);
- break;
-
- case PERF_COUNTER_MULTI_TIMER_INV:
- case PERF_100NSEC_MULTI_TIMER_INV:
- // 100 * (B1 - ((N1 - N0) / (D1 - D0)))
- if(!VALID_DELTA(cd)) return 0;
- numerator = cd->current.Data - cd->previous.Data;
- denominator = cd->current.Time - cd->previous.Time;
- doubleValue = 100.0 * ((double)cd->current.MultiCounterData - ((double)numerator / (double)denominator));
- // printf("Display value is (multi-timer-inv): %f%%\n", doubleValue);
- value = (collected_number)(doubleValue * COLLECTED_NUMBER_PRECISION);
- break;
-
- case PERF_COUNTER_RAWCOUNT:
- case PERF_COUNTER_LARGE_RAWCOUNT:
- // N as decimal
- value = (collected_number)cd->current.Data;
- break;
-
- case PERF_COUNTER_RAWCOUNT_HEX:
- case PERF_COUNTER_LARGE_RAWCOUNT_HEX:
- // N as hexadecimal
- value = (collected_number)cd->current.Data;
- break;
-
- case PERF_COUNTER_DELTA:
- case PERF_COUNTER_LARGE_DELTA:
- if(!VALID_DELTA(cd)) return 0;
- value = (collected_number)(cd->current.Data - cd->previous.Data);
- break;
-
- case PERF_RAW_FRACTION:
- case PERF_LARGE_RAW_FRACTION:
- // 100 * N / B
- if(!cd->current.Time) return 0;
- doubleValue = 100.0 * (double)cd->current.Data / (double)cd->current.Time;
- // printf("Display value is (fraction): %f%%\n", doubleValue);
- value = (collected_number)(doubleValue * COLLECTED_NUMBER_PRECISION);
- break;
-
- default:
- return 0;
- }
-
- return rrddim_set_by_pointer(st, rd, value);
-}
-
-/*
-double perflibCalculateValue(RAW_DATA *current, RAW_DATA *previous) {
- ULONGLONG numerator = 0;
- LONGLONG denominator = 0;
- double doubleValue = 0.0;
- DWORD dwordValue = 0;
-
- if (NULL == previous) {
- // Return error if the counter type requires two samples to calculate the value.
- switch (current->CounterType) {
- default:
- if (PERF_DELTA_COUNTER != (current->CounterType & PERF_DELTA_COUNTER))
- break;
- __fallthrough;
- // fallthrough
-
- case PERF_AVERAGE_TIMER: // Special case.
- case PERF_AVERAGE_BULK: // Special case.
- // printf(" > The counter type requires two samples but only one sample was provided.\n");
- return NAN;
- }
- }
- else {
- if (current->CounterType != previous->CounterType) {
- // printf(" > The samples have inconsistent counter types.\n");
- return NAN;
- }
-
- // Check for integer overflow or bad data from provider (the data from
- // sample 2 must be greater than the data from sample 1).
- if (current->Data < previous->Data)
- {
- // Can happen for various reasons. Commonly occurs with the Process counterset when
- // multiple processes have the same name and one of them starts or stops.
- // Normally you'll just drop the older sample and continue.
- // printf("> current (%llu) is smaller than previous (%llu).\n", current->Data, previous->Data);
- return NAN;
- }
- }
-
- switch (current->CounterType) {
- case PERF_COUNTER_COUNTER:
- case PERF_SAMPLE_COUNTER:
- case PERF_COUNTER_BULK_COUNT:
- // (N1 - N0) / ((D1 - D0) / F)
- numerator = current->Data - previous->Data;
- denominator = current->Time - previous->Time;
- dwordValue = (DWORD)(numerator / ((double)denominator / current->Frequency));
- //printf("Display value is (counter): %lu%s\n", (unsigned long)dwordValue,
- // (previous->CounterType == PERF_SAMPLE_COUNTER) ? "" : "/sec");
- return (double)dwordValue;
-
- case PERF_COUNTER_QUEUELEN_TYPE:
- case PERF_COUNTER_100NS_QUEUELEN_TYPE:
- case PERF_COUNTER_OBJ_TIME_QUEUELEN_TYPE:
- case PERF_COUNTER_LARGE_QUEUELEN_TYPE:
- case PERF_AVERAGE_BULK: // normally not displayed
- // (N1 - N0) / (D1 - D0)
- numerator = current->Data - previous->Data;
- denominator = current->Time - previous->Time;
- doubleValue = (double)numerator / denominator;
- if (previous->CounterType != PERF_AVERAGE_BULK) {
- // printf("Display value is (queuelen): %f\n", doubleValue);
- return doubleValue;
- }
- return NAN;
-
- case PERF_OBJ_TIME_TIMER:
- case PERF_COUNTER_TIMER:
- case PERF_100NSEC_TIMER:
- case PERF_PRECISION_SYSTEM_TIMER:
- case PERF_PRECISION_100NS_TIMER:
- case PERF_PRECISION_OBJECT_TIMER:
- case PERF_SAMPLE_FRACTION:
- // 100 * (N1 - N0) / (D1 - D0)
- numerator = current->Data - previous->Data;
- denominator = current->Time - previous->Time;
- doubleValue = (double)(100 * numerator) / denominator;
- // printf("Display value is (timer): %f%%\n", doubleValue);
- return doubleValue;
-
- case PERF_COUNTER_TIMER_INV:
- // 100 * (1 - ((N1 - N0) / (D1 - D0)))
- numerator = current->Data - previous->Data;
- denominator = current->Time - previous->Time;
- doubleValue = 100 * (1 - ((double)numerator / denominator));
- // printf("Display value is (timer-inv): %f%%\n", doubleValue);
- return doubleValue;
-
- case PERF_100NSEC_TIMER_INV:
- // 100 * (1- (N1 - N0) / (D1 - D0))
- numerator = current->Data - previous->Data;
- denominator = current->Time - previous->Time;
- doubleValue = 100 * (1 - (double)numerator / denominator);
- // printf("Display value is (100ns-timer-inv): %f%%\n", doubleValue);
- return doubleValue;
-
- case PERF_COUNTER_MULTI_TIMER:
- // 100 * ((N1 - N0) / ((D1 - D0) / TB)) / B1
- numerator = current->Data - previous->Data;
- denominator = current->Time - previous->Time;
- denominator /= current->Frequency;
- doubleValue = 100 * ((double)numerator / denominator) / current->MultiCounterData;
- // printf("Display value is (multi-timer): %f%%\n", doubleValue);
- return doubleValue;
-
- case PERF_100NSEC_MULTI_TIMER:
- // 100 * ((N1 - N0) / (D1 - D0)) / B1
- numerator = current->Data - previous->Data;
- denominator = current->Time - previous->Time;
- doubleValue = 100 * ((double)numerator / (double)denominator) / (double)current->MultiCounterData;
- // printf("Display value is (100ns multi-timer): %f%%\n", doubleValue);
- return doubleValue;
-
- case PERF_COUNTER_MULTI_TIMER_INV:
- case PERF_100NSEC_MULTI_TIMER_INV:
- // 100 * (B1 - ((N1 - N0) / (D1 - D0)))
- numerator = current->Data - previous->Data;
- denominator = current->Time - previous->Time;
- doubleValue = 100.0 * ((double)current->MultiCounterData - ((double)numerator / (double)denominator));
- // printf("Display value is (multi-timer-inv): %f%%\n", doubleValue);
- return doubleValue;
-
- case PERF_COUNTER_RAWCOUNT:
- case PERF_COUNTER_LARGE_RAWCOUNT:
- // N as decimal
- // printf("Display value is (rawcount): %llu\n", current->Data);
- return (double)current->Data;
-
- case PERF_COUNTER_RAWCOUNT_HEX:
- case PERF_COUNTER_LARGE_RAWCOUNT_HEX:
- // N as hexadecimal
- // printf("Display value is (hex): 0x%llx\n", current->Data);
- return (double)current->Data;
-
- case PERF_COUNTER_DELTA:
- case PERF_COUNTER_LARGE_DELTA:
- // N1 - N0
- // printf("Display value is (delta): %llu\n", current->Data - previous->Data);
- return (double)(current->Data - previous->Data);
-
- case PERF_RAW_FRACTION:
- case PERF_LARGE_RAW_FRACTION:
- // 100 * N / B
- doubleValue = 100.0 * (double)current->Data / (double)current->Time;
- // printf("Display value is (fraction): %f%%\n", doubleValue);
- return doubleValue;
-
- case PERF_AVERAGE_TIMER:
- // ((N1 - N0) / TB) / (B1 - B0)
- numerator = current->Data - previous->Data;
- denominator = current->Time - previous->Time;
- doubleValue = (double)numerator / (double)current->Frequency / (double)denominator;
- // printf("Display value is (average timer): %f seconds\n", doubleValue);
- return doubleValue;
-
- case PERF_ELAPSED_TIME:
- // (D0 - N0) / F
- doubleValue = (double)(current->Time - current->Data) / (double)current->Frequency;
- // printf("Display value is (elapsed time): %f seconds\n", doubleValue);
- return doubleValue;
-
- case PERF_COUNTER_TEXT:
- case PERF_SAMPLE_BASE:
- case PERF_AVERAGE_BASE:
- case PERF_COUNTER_MULTI_BASE:
- case PERF_RAW_BASE:
- case PERF_COUNTER_NODATA:
- case PERF_PRECISION_TIMESTAMP:
- // printf(" > Non-printing counter type: 0x%08x\n", current->CounterType);
- return NAN;
- break;
-
- default:
- // printf(" > Unrecognized counter type: 0x%08x\n", current->CounterType);
- return NAN;
- break;
- }
-}
-*/
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "perflib-rrd.h"
+
+#define COLLECTED_NUMBER_PRECISION 10000
+
+RRDDIM *perflib_rrddim_add(RRDSET *st, const char *id, const char *name, collected_number multiplier, collected_number divider, COUNTER_DATA *cd) {
+ RRD_ALGORITHM algorithm = RRD_ALGORITHM_ABSOLUTE;
+
+ switch (cd->current.CounterType) {
+ case PERF_COUNTER_COUNTER:
+ case PERF_SAMPLE_COUNTER:
+ case PERF_COUNTER_BULK_COUNT:
+ // (N1 - N0) / ((D1 - D0) / F)
+ // multiplier *= cd->current.Frequency / 10000000;
+ // tested, the frequency is not that useful for netdata
+ // we get right results without it.
+ algorithm = RRD_ALGORITHM_INCREMENTAL;
+ break;
+
+ case PERF_COUNTER_QUEUELEN_TYPE:
+ case PERF_COUNTER_100NS_QUEUELEN_TYPE:
+ case PERF_COUNTER_OBJ_TIME_QUEUELEN_TYPE:
+ case PERF_COUNTER_LARGE_QUEUELEN_TYPE:
+ case PERF_AVERAGE_BULK: // normally not displayed
+ // (N1 - N0) / (D1 - D0)
+ algorithm = RRD_ALGORITHM_INCREMENTAL;
+ break;
+
+ case PERF_OBJ_TIME_TIMER:
+ case PERF_COUNTER_TIMER:
+ case PERF_100NSEC_TIMER:
+ case PERF_PRECISION_SYSTEM_TIMER:
+ case PERF_PRECISION_100NS_TIMER:
+ case PERF_PRECISION_OBJECT_TIMER:
+ case PERF_SAMPLE_FRACTION:
+ // 100 * (N1 - N0) / (D1 - D0)
+ multiplier *= 100;
+ algorithm = RRD_ALGORITHM_INCREMENTAL;
+ break;
+
+ case PERF_COUNTER_TIMER_INV:
+ case PERF_100NSEC_TIMER_INV:
+ // 100 * (1 - ((N1 - N0) / (D1 - D0)))
+ divider *= COLLECTED_NUMBER_PRECISION;
+ algorithm = RRD_ALGORITHM_ABSOLUTE;
+ break;
+
+ case PERF_COUNTER_MULTI_TIMER:
+ // 100 * ((N1 - N0) / ((D1 - D0) / TB)) / B1
+ divider *= COLLECTED_NUMBER_PRECISION;
+ algorithm = RRD_ALGORITHM_ABSOLUTE;
+ break;
+
+ case PERF_100NSEC_MULTI_TIMER:
+ // 100 * ((N1 - N0) / (D1 - D0)) / B1
+ divider *= COLLECTED_NUMBER_PRECISION;
+ algorithm = RRD_ALGORITHM_ABSOLUTE;
+ break;
+
+ case PERF_COUNTER_MULTI_TIMER_INV:
+ case PERF_100NSEC_MULTI_TIMER_INV:
+ // 100 * (B1 - ((N1 - N0) / (D1 - D0)))
+ divider *= COLLECTED_NUMBER_PRECISION;
+ algorithm = RRD_ALGORITHM_ABSOLUTE;
+ break;
+
+ case PERF_COUNTER_RAWCOUNT:
+ case PERF_COUNTER_LARGE_RAWCOUNT:
+ // N as decimal
+ algorithm = RRD_ALGORITHM_ABSOLUTE;
+ break;
+
+ case PERF_COUNTER_RAWCOUNT_HEX:
+ case PERF_COUNTER_LARGE_RAWCOUNT_HEX:
+ // N as hexadecimal
+ algorithm = RRD_ALGORITHM_ABSOLUTE;
+ break;
+
+ case PERF_COUNTER_DELTA:
+ case PERF_COUNTER_LARGE_DELTA:
+ // N1 - N0
+ algorithm = RRD_ALGORITHM_ABSOLUTE;
+ break;
+
+ case PERF_RAW_FRACTION:
+ case PERF_LARGE_RAW_FRACTION:
+ // 100 * N / B
+ algorithm = RRD_ALGORITHM_ABSOLUTE;
+ divider *= COLLECTED_NUMBER_PRECISION;
+ break;
+
+ case PERF_AVERAGE_TIMER:
+ // ((N1 - N0) / TB) / (B1 - B0)
+ // divider *= cd->current.Frequency / 10000000;
+ algorithm = RRD_ALGORITHM_INCREMENTAL;
+ break;
+
+ case PERF_ELAPSED_TIME:
+ // (D0 - N0) / F
+ algorithm = RRD_ALGORITHM_ABSOLUTE;
+ break;
+
+ case PERF_COUNTER_TEXT:
+ case PERF_SAMPLE_BASE:
+ case PERF_AVERAGE_BASE:
+ case PERF_COUNTER_MULTI_BASE:
+ case PERF_RAW_BASE:
+ case PERF_COUNTER_NODATA:
+ case PERF_PRECISION_TIMESTAMP:
+ default:
+ break;
+ }
+
+ return rrddim_add(st, id, name, multiplier, divider, algorithm);
+}
+
+#define VALID_DELTA(cd) \
+ ((cd)->previous.Time > 0 && (cd)->current.Data >= (cd)->previous.Data && (cd)->current.Time > (cd)->previous.Time)
+
+collected_number perflib_rrddim_set_by_pointer(RRDSET *st, RRDDIM *rd, COUNTER_DATA *cd) {
+ ULONGLONG numerator = 0;
+ LONGLONG denominator = 0;
+ double doubleValue = 0.0;
+ collected_number value;
+
+ switch(cd->current.CounterType) {
+ case PERF_COUNTER_COUNTER:
+ case PERF_SAMPLE_COUNTER:
+ case PERF_COUNTER_BULK_COUNT:
+ // (N1 - N0) / ((D1 - D0) / F)
+ value = (collected_number)cd->current.Data;
+ break;
+
+ case PERF_COUNTER_QUEUELEN_TYPE:
+ case PERF_COUNTER_100NS_QUEUELEN_TYPE:
+ case PERF_COUNTER_OBJ_TIME_QUEUELEN_TYPE:
+ case PERF_COUNTER_LARGE_QUEUELEN_TYPE:
+ case PERF_AVERAGE_BULK: // normally not displayed
+ // (N1 - N0) / (D1 - D0)
+ value = (collected_number)cd->current.Data;
+ break;
+
+ case PERF_OBJ_TIME_TIMER:
+ case PERF_COUNTER_TIMER:
+ case PERF_100NSEC_TIMER:
+ case PERF_PRECISION_SYSTEM_TIMER:
+ case PERF_PRECISION_100NS_TIMER:
+ case PERF_PRECISION_OBJECT_TIMER:
+ case PERF_SAMPLE_FRACTION:
+ // 100 * (N1 - N0) / (D1 - D0)
+ value = (collected_number)cd->current.Data;
+ break;
+
+ case PERF_COUNTER_TIMER_INV:
+ case PERF_100NSEC_TIMER_INV:
+ // 100 * (1 - ((N1 - N0) / (D1 - D0)))
+ if(!VALID_DELTA(cd)) return 0;
+ numerator = cd->current.Data - cd->previous.Data;
+ denominator = cd->current.Time - cd->previous.Time;
+ doubleValue = 100.0 * (1.0 - ((double)numerator / (double)denominator));
+ // printf("Display value is (timer-inv): %f%%\n", doubleValue);
+ value = (collected_number)(doubleValue * COLLECTED_NUMBER_PRECISION);
+ break;
+
+ case PERF_COUNTER_MULTI_TIMER:
+ // 100 * ((N1 - N0) / ((D1 - D0) / TB)) / B1
+ if(!VALID_DELTA(cd)) return 0;
+ numerator = cd->current.Data - cd->previous.Data;
+ denominator = cd->current.Time - cd->previous.Time;
+ denominator /= cd->current.Frequency;
+ doubleValue = 100.0 * ((double)numerator / (double)denominator) / cd->current.MultiCounterData;
+ // printf("Display value is (multi-timer): %f%%\n", doubleValue);
+ value = (collected_number)(doubleValue * COLLECTED_NUMBER_PRECISION);
+ break;
+
+ case PERF_100NSEC_MULTI_TIMER:
+ // 100 * ((N1 - N0) / (D1 - D0)) / B1
+ if(!VALID_DELTA(cd)) return 0;
+ numerator = cd->current.Data - cd->previous.Data;
+ denominator = cd->current.Time - cd->previous.Time;
+ doubleValue = 100.0 * ((double)numerator / (double)denominator) / (double)cd->current.MultiCounterData;
+ // printf("Display value is (100ns multi-timer): %f%%\n", doubleValue);
+ value = (collected_number)(doubleValue * COLLECTED_NUMBER_PRECISION);
+ break;
+
+ case PERF_COUNTER_MULTI_TIMER_INV:
+ case PERF_100NSEC_MULTI_TIMER_INV:
+ // 100 * (B1 - ((N1 - N0) / (D1 - D0)))
+ if(!VALID_DELTA(cd)) return 0;
+ numerator = cd->current.Data - cd->previous.Data;
+ denominator = cd->current.Time - cd->previous.Time;
+ doubleValue = 100.0 * ((double)cd->current.MultiCounterData - ((double)numerator / (double)denominator));
+ // printf("Display value is (multi-timer-inv): %f%%\n", doubleValue);
+ value = (collected_number)(doubleValue * COLLECTED_NUMBER_PRECISION);
+ break;
+
+ case PERF_COUNTER_RAWCOUNT:
+ case PERF_COUNTER_LARGE_RAWCOUNT:
+ // N as decimal
+ value = (collected_number)cd->current.Data;
+ break;
+
+ case PERF_COUNTER_RAWCOUNT_HEX:
+ case PERF_COUNTER_LARGE_RAWCOUNT_HEX:
+ // N as hexadecimal
+ value = (collected_number)cd->current.Data;
+ break;
+
+ case PERF_COUNTER_DELTA:
+ case PERF_COUNTER_LARGE_DELTA:
+ if(!VALID_DELTA(cd)) return 0;
+ value = (collected_number)(cd->current.Data - cd->previous.Data);
+ break;
+
+ case PERF_RAW_FRACTION:
+ case PERF_LARGE_RAW_FRACTION:
+ // 100 * N / B
+ if(!cd->current.Time) return 0;
+ doubleValue = 100.0 * (double)cd->current.Data / (double)cd->current.Time;
+ // printf("Display value is (fraction): %f%%\n", doubleValue);
+ value = (collected_number)(doubleValue * COLLECTED_NUMBER_PRECISION);
+ break;
+
+ default:
+ return 0;
+ }
+
+ return rrddim_set_by_pointer(st, rd, value);
+}
+
+/*
+double perflibCalculateValue(RAW_DATA *current, RAW_DATA *previous) {
+ ULONGLONG numerator = 0;
+ LONGLONG denominator = 0;
+ double doubleValue = 0.0;
+ DWORD dwordValue = 0;
+
+ if (NULL == previous) {
+ // Return error if the counter type requires two samples to calculate the value.
+ switch (current->CounterType) {
+ default:
+ if (PERF_DELTA_COUNTER != (current->CounterType & PERF_DELTA_COUNTER))
+ break;
+ __fallthrough;
+ // fallthrough
+
+ case PERF_AVERAGE_TIMER: // Special case.
+ case PERF_AVERAGE_BULK: // Special case.
+ // printf(" > The counter type requires two samples but only one sample was provided.\n");
+ return NAN;
+ }
+ }
+ else {
+ if (current->CounterType != previous->CounterType) {
+ // printf(" > The samples have inconsistent counter types.\n");
+ return NAN;
+ }
+
+ // Check for integer overflow or bad data from provider (the data from
+ // sample 2 must be greater than the data from sample 1).
+ if (current->Data < previous->Data)
+ {
+ // Can happen for various reasons. Commonly occurs with the Process counterset when
+ // multiple processes have the same name and one of them starts or stops.
+ // Normally you'll just drop the older sample and continue.
+ // printf("> current (%llu) is smaller than previous (%llu).\n", current->Data, previous->Data);
+ return NAN;
+ }
+ }
+
+ switch (current->CounterType) {
+ case PERF_COUNTER_COUNTER:
+ case PERF_SAMPLE_COUNTER:
+ case PERF_COUNTER_BULK_COUNT:
+ // (N1 - N0) / ((D1 - D0) / F)
+ numerator = current->Data - previous->Data;
+ denominator = current->Time - previous->Time;
+ dwordValue = (DWORD)(numerator / ((double)denominator / current->Frequency));
+ //printf("Display value is (counter): %lu%s\n", (unsigned long)dwordValue,
+ // (previous->CounterType == PERF_SAMPLE_COUNTER) ? "" : "/sec");
+ return (double)dwordValue;
+
+ case PERF_COUNTER_QUEUELEN_TYPE:
+ case PERF_COUNTER_100NS_QUEUELEN_TYPE:
+ case PERF_COUNTER_OBJ_TIME_QUEUELEN_TYPE:
+ case PERF_COUNTER_LARGE_QUEUELEN_TYPE:
+ case PERF_AVERAGE_BULK: // normally not displayed
+ // (N1 - N0) / (D1 - D0)
+ numerator = current->Data - previous->Data;
+ denominator = current->Time - previous->Time;
+ doubleValue = (double)numerator / denominator;
+ if (previous->CounterType != PERF_AVERAGE_BULK) {
+ // printf("Display value is (queuelen): %f\n", doubleValue);
+ return doubleValue;
+ }
+ return NAN;
+
+ case PERF_OBJ_TIME_TIMER:
+ case PERF_COUNTER_TIMER:
+ case PERF_100NSEC_TIMER:
+ case PERF_PRECISION_SYSTEM_TIMER:
+ case PERF_PRECISION_100NS_TIMER:
+ case PERF_PRECISION_OBJECT_TIMER:
+ case PERF_SAMPLE_FRACTION:
+ // 100 * (N1 - N0) / (D1 - D0)
+ numerator = current->Data - previous->Data;
+ denominator = current->Time - previous->Time;
+ doubleValue = (double)(100 * numerator) / denominator;
+ // printf("Display value is (timer): %f%%\n", doubleValue);
+ return doubleValue;
+
+ case PERF_COUNTER_TIMER_INV:
+ // 100 * (1 - ((N1 - N0) / (D1 - D0)))
+ numerator = current->Data - previous->Data;
+ denominator = current->Time - previous->Time;
+ doubleValue = 100 * (1 - ((double)numerator / denominator));
+ // printf("Display value is (timer-inv): %f%%\n", doubleValue);
+ return doubleValue;
+
+ case PERF_100NSEC_TIMER_INV:
+ // 100 * (1- (N1 - N0) / (D1 - D0))
+ numerator = current->Data - previous->Data;
+ denominator = current->Time - previous->Time;
+ doubleValue = 100 * (1 - (double)numerator / denominator);
+ // printf("Display value is (100ns-timer-inv): %f%%\n", doubleValue);
+ return doubleValue;
+
+ case PERF_COUNTER_MULTI_TIMER:
+ // 100 * ((N1 - N0) / ((D1 - D0) / TB)) / B1
+ numerator = current->Data - previous->Data;
+ denominator = current->Time - previous->Time;
+ denominator /= current->Frequency;
+ doubleValue = 100 * ((double)numerator / denominator) / current->MultiCounterData;
+ // printf("Display value is (multi-timer): %f%%\n", doubleValue);
+ return doubleValue;
+
+ case PERF_100NSEC_MULTI_TIMER:
+ // 100 * ((N1 - N0) / (D1 - D0)) / B1
+ numerator = current->Data - previous->Data;
+ denominator = current->Time - previous->Time;
+ doubleValue = 100 * ((double)numerator / (double)denominator) / (double)current->MultiCounterData;
+ // printf("Display value is (100ns multi-timer): %f%%\n", doubleValue);
+ return doubleValue;
+
+ case PERF_COUNTER_MULTI_TIMER_INV:
+ case PERF_100NSEC_MULTI_TIMER_INV:
+ // 100 * (B1 - ((N1 - N0) / (D1 - D0)))
+ numerator = current->Data - previous->Data;
+ denominator = current->Time - previous->Time;
+ doubleValue = 100.0 * ((double)current->MultiCounterData - ((double)numerator / (double)denominator));
+ // printf("Display value is (multi-timer-inv): %f%%\n", doubleValue);
+ return doubleValue;
+
+ case PERF_COUNTER_RAWCOUNT:
+ case PERF_COUNTER_LARGE_RAWCOUNT:
+ // N as decimal
+ // printf("Display value is (rawcount): %llu\n", current->Data);
+ return (double)current->Data;
+
+ case PERF_COUNTER_RAWCOUNT_HEX:
+ case PERF_COUNTER_LARGE_RAWCOUNT_HEX:
+ // N as hexadecimal
+ // printf("Display value is (hex): 0x%llx\n", current->Data);
+ return (double)current->Data;
+
+ case PERF_COUNTER_DELTA:
+ case PERF_COUNTER_LARGE_DELTA:
+ // N1 - N0
+ // printf("Display value is (delta): %llu\n", current->Data - previous->Data);
+ return (double)(current->Data - previous->Data);
+
+ case PERF_RAW_FRACTION:
+ case PERF_LARGE_RAW_FRACTION:
+ // 100 * N / B
+ doubleValue = 100.0 * (double)current->Data / (double)current->Time;
+ // printf("Display value is (fraction): %f%%\n", doubleValue);
+ return doubleValue;
+
+ case PERF_AVERAGE_TIMER:
+ // ((N1 - N0) / TB) / (B1 - B0)
+ numerator = current->Data - previous->Data;
+ denominator = current->Time - previous->Time;
+ doubleValue = (double)numerator / (double)current->Frequency / (double)denominator;
+ // printf("Display value is (average timer): %f seconds\n", doubleValue);
+ return doubleValue;
+
+ case PERF_ELAPSED_TIME:
+ // (D0 - N0) / F
+ doubleValue = (double)(current->Time - current->Data) / (double)current->Frequency;
+ // printf("Display value is (elapsed time): %f seconds\n", doubleValue);
+ return doubleValue;
+
+ case PERF_COUNTER_TEXT:
+ case PERF_SAMPLE_BASE:
+ case PERF_AVERAGE_BASE:
+ case PERF_COUNTER_MULTI_BASE:
+ case PERF_RAW_BASE:
+ case PERF_COUNTER_NODATA:
+ case PERF_PRECISION_TIMESTAMP:
+ // printf(" > Non-printing counter type: 0x%08x\n", current->CounterType);
+ return NAN;
+ break;
+
+ default:
+ // printf(" > Unrecognized counter type: 0x%08x\n", current->CounterType);
+ return NAN;
+ break;
+ }
+}
+*/
diff --git a/src/collectors/windows.plugin/perflib-rrd.h b/src/collectors/windows.plugin/perflib-rrd.h
index 0b91de2ec..2347c5b1d 100644
--- a/src/collectors/windows.plugin/perflib-rrd.h
+++ b/src/collectors/windows.plugin/perflib-rrd.h
@@ -1,12 +1,11 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-#ifndef NETDATA_PERFLIB_RRD_H
-#define NETDATA_PERFLIB_RRD_H
-
-#include "perflib.h"
-#include "database/rrd.h"
-
-RRDDIM *perflib_rrddim_add(RRDSET *st, const char *id, const char *name, collected_number multiplier, collected_number divider, COUNTER_DATA *cd);
-collected_number perflib_rrddim_set_by_pointer(RRDSET *st, RRDDIM *rd, COUNTER_DATA *cd);
-
-#endif //NETDATA_PERFLIB_RRD_H
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_PERFLIB_RRD_H
+#define NETDATA_PERFLIB_RRD_H
+
+#include "database/rrd.h"
+
+RRDDIM *perflib_rrddim_add(RRDSET *st, const char *id, const char *name, collected_number multiplier, collected_number divider, COUNTER_DATA *cd);
+collected_number perflib_rrddim_set_by_pointer(RRDSET *st, RRDDIM *rd, COUNTER_DATA *cd);
+
+#endif //NETDATA_PERFLIB_RRD_H
diff --git a/src/collectors/windows.plugin/perflib-storage.c b/src/collectors/windows.plugin/perflib-storage.c
index d3b80052f..823ba2c04 100644
--- a/src/collectors/windows.plugin/perflib-storage.c
+++ b/src/collectors/windows.plugin/perflib-storage.c
@@ -1,317 +1,632 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-#include "windows_plugin.h"
-#include "windows-internals.h"
-
-#define _COMMON_PLUGIN_NAME PLUGIN_WINDOWS_NAME
-#define _COMMON_PLUGIN_MODULE_NAME "PerflibStorage"
-#include "../common-contexts/common-contexts.h"
-
-struct logical_disk {
- bool collected_metadata;
-
- STRING *filesystem;
-
- RRDSET *st_disk_space;
- RRDDIM *rd_disk_space_used;
- RRDDIM *rd_disk_space_free;
-
- COUNTER_DATA percentDiskFree;
- // COUNTER_DATA freeMegabytes;
-};
-
-struct physical_disk {
- bool collected_metadata;
-
- STRING *device;
- STRING *mount_point;
-
- ND_DISK_IO disk_io;
- COUNTER_DATA diskReadBytesPerSec;
- COUNTER_DATA diskWriteBytesPerSec;
-
- COUNTER_DATA percentIdleTime;
- COUNTER_DATA percentDiskTime;
- COUNTER_DATA percentDiskReadTime;
- COUNTER_DATA percentDiskWriteTime;
- COUNTER_DATA currentDiskQueueLength;
- COUNTER_DATA averageDiskQueueLength;
- COUNTER_DATA averageDiskReadQueueLength;
- COUNTER_DATA averageDiskWriteQueueLength;
- COUNTER_DATA averageDiskSecondsPerTransfer;
- COUNTER_DATA averageDiskSecondsPerRead;
- COUNTER_DATA averageDiskSecondsPerWrite;
- COUNTER_DATA diskTransfersPerSec;
- COUNTER_DATA diskReadsPerSec;
- COUNTER_DATA diskWritesPerSec;
- COUNTER_DATA diskBytesPerSec;
- COUNTER_DATA averageDiskBytesPerTransfer;
- COUNTER_DATA averageDiskBytesPerRead;
- COUNTER_DATA averageDiskBytesPerWrite;
- COUNTER_DATA splitIoPerSec;
-};
-
-struct physical_disk system_physical_total = {
- .collected_metadata = true,
-};
-
-void dict_logical_disk_insert_cb(const DICTIONARY_ITEM *item __maybe_unused, void *value, void *data __maybe_unused) {
- struct logical_disk *ld = value;
-
- ld->percentDiskFree.key = "% Free Space";
- // ld->freeMegabytes.key = "Free Megabytes";
-}
-
-void initialize_physical_disk(struct physical_disk *pd) {
- pd->percentIdleTime.key = "% Idle Time";
- pd->percentDiskTime.key = "% Disk Time";
- pd->percentDiskReadTime.key = "% Disk Read Time";
- pd->percentDiskWriteTime.key = "% Disk Write Time";
- pd->currentDiskQueueLength.key = "Current Disk Queue Length";
- pd->averageDiskQueueLength.key = "Avg. Disk Queue Length";
- pd->averageDiskReadQueueLength.key = "Avg. Disk Read Queue Length";
- pd->averageDiskWriteQueueLength.key = "Avg. Disk Write Queue Length";
- pd->averageDiskSecondsPerTransfer.key = "Avg. Disk sec/Transfer";
- pd->averageDiskSecondsPerRead.key = "Avg. Disk sec/Read";
- pd->averageDiskSecondsPerWrite.key = "Avg. Disk sec/Write";
- pd->diskTransfersPerSec.key = "Disk Transfers/sec";
- pd->diskReadsPerSec.key = "Disk Reads/sec";
- pd->diskWritesPerSec.key = "Disk Writes/sec";
- pd->diskBytesPerSec.key = "Disk Bytes/sec";
- pd->diskReadBytesPerSec.key = "Disk Read Bytes/sec";
- pd->diskWriteBytesPerSec.key = "Disk Write Bytes/sec";
- pd->averageDiskBytesPerTransfer.key = "Avg. Disk Bytes/Transfer";
- pd->averageDiskBytesPerRead.key = "Avg. Disk Bytes/Read";
- pd->averageDiskBytesPerWrite.key = "Avg. Disk Bytes/Write";
- pd->splitIoPerSec.key = "Split IO/Sec";
-}
-
-void dict_physical_disk_insert_cb(const DICTIONARY_ITEM *item __maybe_unused, void *value, void *data __maybe_unused) {
- struct physical_disk *pd = value;
- initialize_physical_disk(pd);
-}
-
-static DICTIONARY *logicalDisks = NULL, *physicalDisks = NULL;
-static void initialize(void) {
- initialize_physical_disk(&system_physical_total);
-
- logicalDisks = dictionary_create_advanced(DICT_OPTION_DONT_OVERWRITE_VALUE |
- DICT_OPTION_FIXED_SIZE, NULL, sizeof(struct logical_disk));
-
- dictionary_register_insert_callback(logicalDisks, dict_logical_disk_insert_cb, NULL);
-
- physicalDisks = dictionary_create_advanced(DICT_OPTION_DONT_OVERWRITE_VALUE |
- DICT_OPTION_FIXED_SIZE, NULL, sizeof(struct physical_disk));
-
- dictionary_register_insert_callback(physicalDisks, dict_physical_disk_insert_cb, NULL);
-}
-
-static STRING *getFileSystemType(const char* diskName) {
- if (!diskName || !*diskName) return NULL;
-
- char fileSystemNameBuffer[128] = {0}; // Buffer for file system name
- char pathBuffer[256] = {0}; // Path buffer to accommodate different formats
- DWORD serialNumber = 0;
- DWORD maxComponentLength = 0;
- DWORD fileSystemFlags = 0;
- BOOL success;
-
- // Check if the input is likely a drive letter (e.g., "C:")
- if (isalpha((uint8_t)diskName[0]) && diskName[1] == ':' && diskName[2] == '\0')
- snprintf(pathBuffer, sizeof(pathBuffer), "%s\\", diskName); // Format as "C:\\"
- else
- // Assume it's a Volume GUID path or a device path
- snprintf(pathBuffer, sizeof(pathBuffer), "\\\\.\\%s", diskName); // Format as "\\.\HarddiskVolume1"
-
- // Attempt to get the volume information
- success = GetVolumeInformation(
- pathBuffer, // Path to the disk
- NULL, // We don't need the volume name
- 0, // Size of volume name buffer is 0
- &serialNumber, // Volume serial number
- &maxComponentLength, // Maximum component length
- &fileSystemFlags, // File system flags
- fileSystemNameBuffer, // File system name buffer
- sizeof(fileSystemNameBuffer) // Size of file system name buffer
- );
-
- if (success && fileSystemNameBuffer[0]) {
- char *s = fileSystemNameBuffer;
- while(*s) { *s = tolower((uint8_t)*s); s++; }
- return string_strdupz(fileSystemNameBuffer); // Duplicate the file system name
- }
- else
- return NULL;
-}
-
-static bool do_logical_disk(PERF_DATA_BLOCK *pDataBlock, int update_every) {
- DICTIONARY *dict = logicalDisks;
-
- PERF_OBJECT_TYPE *pObjectType = perflibFindObjectTypeByName(pDataBlock, "LogicalDisk");
- if(!pObjectType) return false;
-
- PERF_INSTANCE_DEFINITION *pi = NULL;
- for(LONG i = 0; i < pObjectType->NumInstances ; i++) {
- pi = perflibForEachInstance(pDataBlock, pObjectType, pi);
- if(!pi) break;
-
- if(!getInstanceName(pDataBlock, pObjectType, pi, windows_shared_buffer, sizeof(windows_shared_buffer)))
- strncpyz(windows_shared_buffer, "[unknown]", sizeof(windows_shared_buffer) - 1);
-
- if(strcasecmp(windows_shared_buffer, "_Total") == 0)
- continue;
-
- struct logical_disk *d = dictionary_set(dict, windows_shared_buffer, NULL, sizeof(*d));
-
- if(!d->collected_metadata) {
- d->filesystem = getFileSystemType(windows_shared_buffer);
- d->collected_metadata = true;
- }
-
- perflibGetInstanceCounter(pDataBlock, pObjectType, pi, &d->percentDiskFree);
- // perflibGetInstanceCounter(pDataBlock, pObjectType, pi, &d->freeMegabytes);
-
- if(!d->st_disk_space) {
- d->st_disk_space = rrdset_create_localhost(
- "disk_space"
- , windows_shared_buffer, NULL
- , windows_shared_buffer, "disk.space"
- , "Disk Space Usage"
- , "GiB"
- , PLUGIN_WINDOWS_NAME
- , "PerflibStorage"
- , NETDATA_CHART_PRIO_DISKSPACE_SPACE
- , update_every
- , RRDSET_TYPE_STACKED
- );
-
- rrdlabels_add(d->st_disk_space->rrdlabels, "mount_point", windows_shared_buffer, RRDLABEL_SRC_AUTO);
- // rrdlabels_add(d->st->rrdlabels, "mount_root", name, RRDLABEL_SRC_AUTO);
-
- if(d->filesystem)
- rrdlabels_add(d->st_disk_space->rrdlabels, "filesystem", string2str(d->filesystem), RRDLABEL_SRC_AUTO);
-
- d->rd_disk_space_free = rrddim_add(d->st_disk_space, "avail", NULL, 1, 1024, RRD_ALGORITHM_ABSOLUTE);
- d->rd_disk_space_used = rrddim_add(d->st_disk_space, "used", NULL, 1, 1024, RRD_ALGORITHM_ABSOLUTE);
- }
-
- // percentDiskFree has the free space in Data and the size of the disk in Time, in MiB.
- rrddim_set_by_pointer(d->st_disk_space, d->rd_disk_space_free, (collected_number)d->percentDiskFree.current.Data);
- rrddim_set_by_pointer(d->st_disk_space, d->rd_disk_space_used, (collected_number)(d->percentDiskFree.current.Time - d->percentDiskFree.current.Data));
- rrdset_done(d->st_disk_space);
- }
-
- return true;
-}
-
-static void physical_disk_labels(RRDSET *st, void *data) {
- struct physical_disk *d = data;
-
- if(d->device)
- rrdlabels_add(st->rrdlabels, "device", string2str(d->device), RRDLABEL_SRC_AUTO);
-
- if (d->mount_point)
- rrdlabels_add(st->rrdlabels, "mount_point", string2str(d->mount_point), RRDLABEL_SRC_AUTO);
-}
-
-static bool do_physical_disk(PERF_DATA_BLOCK *pDataBlock, int update_every) {
- DICTIONARY *dict = physicalDisks;
-
- PERF_OBJECT_TYPE *pObjectType = perflibFindObjectTypeByName(pDataBlock, "PhysicalDisk");
- if(!pObjectType) return false;
-
- PERF_INSTANCE_DEFINITION *pi = NULL;
- for (LONG i = 0; i < pObjectType->NumInstances; i++) {
- pi = perflibForEachInstance(pDataBlock, pObjectType, pi);
- if (!pi)
- break;
-
- if (!getInstanceName(pDataBlock, pObjectType, pi, windows_shared_buffer, sizeof(windows_shared_buffer)))
- strncpyz(windows_shared_buffer, "[unknown]", sizeof(windows_shared_buffer) - 1);
-
- char *device = windows_shared_buffer;
- char *mount_point = NULL;
-
- if((mount_point = strchr(device, ' '))) {
- *mount_point = '\0';
- mount_point++;
- }
-
- struct physical_disk *d;
- bool is_system;
- if (strcasecmp(windows_shared_buffer, "_Total") == 0) {
- d = &system_physical_total;
- is_system = true;
- }
- else {
- d = dictionary_set(dict, device, NULL, sizeof(*d));
- is_system = false;
- }
-
- if (!d->collected_metadata) {
- // TODO collect metadata - device_type, serial, id
- d->device = string_strdupz(device);
- d->mount_point = string_strdupz(mount_point);
- d->collected_metadata = true;
- }
-
- if (perflibGetInstanceCounter(pDataBlock, pObjectType, pi, &d->diskReadBytesPerSec) &&
- perflibGetInstanceCounter(pDataBlock, pObjectType, pi, &d->diskWriteBytesPerSec)) {
- if(is_system)
- common_system_io(d->diskReadBytesPerSec.current.Data, d->diskWriteBytesPerSec.current.Data, update_every);
- else
- common_disk_io(
- &d->disk_io,
- device,
- NULL,
- d->diskReadBytesPerSec.current.Data,
- d->diskWriteBytesPerSec.current.Data,
- update_every,
- physical_disk_labels,
- d);
- }
-
- perflibGetInstanceCounter(pDataBlock, pObjectType, pi, &d->percentIdleTime);
- perflibGetInstanceCounter(pDataBlock, pObjectType, pi, &d->percentDiskTime);
- perflibGetInstanceCounter(pDataBlock, pObjectType, pi, &d->percentDiskReadTime);
- perflibGetInstanceCounter(pDataBlock, pObjectType, pi, &d->percentDiskWriteTime);
- perflibGetInstanceCounter(pDataBlock, pObjectType, pi, &d->currentDiskQueueLength);
- perflibGetInstanceCounter(pDataBlock, pObjectType, pi, &d->averageDiskQueueLength);
- perflibGetInstanceCounter(pDataBlock, pObjectType, pi, &d->averageDiskReadQueueLength);
- perflibGetInstanceCounter(pDataBlock, pObjectType, pi, &d->averageDiskWriteQueueLength);
- perflibGetInstanceCounter(pDataBlock, pObjectType, pi, &d->averageDiskSecondsPerTransfer);
- perflibGetInstanceCounter(pDataBlock, pObjectType, pi, &d->averageDiskSecondsPerRead);
- perflibGetInstanceCounter(pDataBlock, pObjectType, pi, &d->averageDiskSecondsPerWrite);
- perflibGetInstanceCounter(pDataBlock, pObjectType, pi, &d->diskTransfersPerSec);
- perflibGetInstanceCounter(pDataBlock, pObjectType, pi, &d->diskReadsPerSec);
- perflibGetInstanceCounter(pDataBlock, pObjectType, pi, &d->diskWritesPerSec);
- perflibGetInstanceCounter(pDataBlock, pObjectType, pi, &d->diskBytesPerSec);
- perflibGetInstanceCounter(pDataBlock, pObjectType, pi, &d->averageDiskBytesPerTransfer);
- perflibGetInstanceCounter(pDataBlock, pObjectType, pi, &d->averageDiskBytesPerRead);
- perflibGetInstanceCounter(pDataBlock, pObjectType, pi, &d->averageDiskBytesPerWrite);
- perflibGetInstanceCounter(pDataBlock, pObjectType, pi, &d->splitIoPerSec);
- }
-
- return true;
-}
-
-int do_PerflibStorage(int update_every, usec_t dt __maybe_unused) {
- static bool initialized = false;
-
- if(unlikely(!initialized)) {
- initialize();
- initialized = true;
- }
-
- DWORD id = RegistryFindIDByName("LogicalDisk");
- if(id == PERFLIB_REGISTRY_NAME_NOT_FOUND)
- return -1;
-
- PERF_DATA_BLOCK *pDataBlock = perflibGetPerformanceData(id);
- if(!pDataBlock) return -1;
-
- do_logical_disk(pDataBlock, update_every);
- do_physical_disk(pDataBlock, update_every);
-
- return 0;
-}
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "windows_plugin.h"
+#include "windows-internals.h"
+
+#define _COMMON_PLUGIN_NAME PLUGIN_WINDOWS_NAME
+#define _COMMON_PLUGIN_MODULE_NAME "PerflibStorage"
+#include "../common-contexts/common-contexts.h"
+#include "libnetdata/os/windows-wmi/windows-wmi.h"
+
+struct logical_disk {
+ usec_t last_collected;
+ bool collected_metadata;
+
+ UINT DriveType;
+ DWORD SerialNumber;
+ bool readonly;
+
+ STRING *filesystem;
+
+ RRDSET *st_disk_space;
+ RRDDIM *rd_disk_space_used;
+ RRDDIM *rd_disk_space_free;
+
+ COUNTER_DATA percentDiskFree;
+ // COUNTER_DATA freeMegabytes;
+};
+
+struct physical_disk {
+ usec_t last_collected;
+ bool collected_metadata;
+
+ STRING *device;
+ STRING *mount_point;
+ STRING *manufacturer;
+ STRING *model;
+ STRING *media_type;
+ STRING *name;
+ STRING *device_id;
+
+ ND_DISK_IO disk_io;
+ // COUNTER_DATA diskBytesPerSec;
+ COUNTER_DATA diskReadBytesPerSec;
+ COUNTER_DATA diskWriteBytesPerSec;
+
+ ND_DISK_OPS disk_ops;
+ // COUNTER_DATA diskTransfersPerSec;
+ COUNTER_DATA diskReadsPerSec;
+ COUNTER_DATA diskWritesPerSec;
+
+ ND_DISK_UTIL disk_util;
+ COUNTER_DATA percentIdleTime;
+
+ ND_DISK_BUSY disk_busy;
+ COUNTER_DATA percentDiskTime;
+
+ ND_DISK_IOTIME disk_iotime;
+ COUNTER_DATA percentDiskReadTime;
+ COUNTER_DATA percentDiskWriteTime;
+
+ ND_DISK_QOPS disk_qops;
+ COUNTER_DATA currentDiskQueueLength;
+ // COUNTER_DATA averageDiskQueueLength;
+ // COUNTER_DATA averageDiskReadQueueLength;
+ // COUNTER_DATA averageDiskWriteQueueLength;
+
+ ND_DISK_AWAIT disk_await;
+ COUNTER_DATA averageDiskSecondsPerRead;
+ COUNTER_DATA averageDiskSecondsPerWrite;
+
+ ND_DISK_SVCTM disk_svctm;
+ COUNTER_DATA averageDiskSecondsPerTransfer;
+
+ ND_DISK_AVGSZ disk_avgsz;
+ //COUNTER_DATA averageDiskBytesPerTransfer;
+ COUNTER_DATA averageDiskBytesPerRead;
+ COUNTER_DATA averageDiskBytesPerWrite;
+
+ COUNTER_DATA splitIoPerSec;
+ RRDSET *st_split;
+ RRDDIM *rd_split;
+};
+
+struct physical_disk system_physical_total = {
+ .collected_metadata = true,
+};
+
+static void dict_logical_disk_insert_cb(const DICTIONARY_ITEM *item __maybe_unused, void *value, void *data __maybe_unused) {
+ struct logical_disk *d = value;
+
+ d->percentDiskFree.key = "% Free Space";
+ // d->freeMegabytes.key = "Free Megabytes";
+}
+
+static void logical_disk_cleanup(struct logical_disk *d) {
+ rrdset_is_obsolete___safe_from_collector_thread(d->st_disk_space);
+}
+
+static void physical_disk_initialize(struct physical_disk *d) {
+ d->percentIdleTime.key = "% Idle Time";
+ d->percentDiskTime.key = "% Disk Time";
+ d->percentDiskReadTime.key = "% Disk Read Time";
+ d->percentDiskWriteTime.key = "% Disk Write Time";
+ d->currentDiskQueueLength.key = "Current Disk Queue Length";
+ // d->averageDiskQueueLength.key = "Avg. Disk Queue Length";
+ // d->averageDiskReadQueueLength.key = "Avg. Disk Read Queue Length";
+ // d->averageDiskWriteQueueLength.key = "Avg. Disk Write Queue Length";
+ d->averageDiskSecondsPerTransfer.key = "Avg. Disk sec/Transfer";
+ d->averageDiskSecondsPerRead.key = "Avg. Disk sec/Read";
+ d->averageDiskSecondsPerWrite.key = "Avg. Disk sec/Write";
+ // d->diskTransfersPerSec.key = "Disk Transfers/sec";
+ d->diskReadsPerSec.key = "Disk Reads/sec";
+ d->diskWritesPerSec.key = "Disk Writes/sec";
+ // d->diskBytesPerSec.key = "Disk Bytes/sec";
+ d->diskReadBytesPerSec.key = "Disk Read Bytes/sec";
+ d->diskWriteBytesPerSec.key = "Disk Write Bytes/sec";
+ // d->averageDiskBytesPerTransfer.key = "Avg. Disk Bytes/Transfer";
+ d->averageDiskBytesPerRead.key = "Avg. Disk Bytes/Read";
+ d->averageDiskBytesPerWrite.key = "Avg. Disk Bytes/Write";
+ d->splitIoPerSec.key = "Split IO/Sec";
+}
+
+static void physical_disk_cleanup(struct physical_disk *d) {
+ string_freez(d->device);
+ string_freez(d->mount_point);
+ string_freez(d->manufacturer);
+ string_freez(d->model);
+ string_freez(d->media_type);
+ string_freez(d->name);
+ string_freez(d->device_id);
+
+ rrdset_is_obsolete___safe_from_collector_thread(d->disk_io.st_io);
+ rrdset_is_obsolete___safe_from_collector_thread(d->disk_ops.st_ops);
+ rrdset_is_obsolete___safe_from_collector_thread(d->disk_util.st_util);
+ rrdset_is_obsolete___safe_from_collector_thread(d->disk_busy.st_busy);
+ rrdset_is_obsolete___safe_from_collector_thread(d->disk_iotime.st_iotime);
+ rrdset_is_obsolete___safe_from_collector_thread(d->disk_qops.st_qops);
+ rrdset_is_obsolete___safe_from_collector_thread(d->disk_await.st_await);
+ rrdset_is_obsolete___safe_from_collector_thread(d->disk_svctm.st_svctm);
+ rrdset_is_obsolete___safe_from_collector_thread(d->disk_avgsz.st_avgsz);
+ rrdset_is_obsolete___safe_from_collector_thread(d->st_split);
+}
+
+void dict_physical_disk_insert_cb(const DICTIONARY_ITEM *item __maybe_unused, void *value, void *data __maybe_unused) {
+ struct physical_disk *pd = value;
+ physical_disk_initialize(pd);
+}
+
+static DICTIONARY *logicalDisks = NULL, *physicalDisks = NULL;
+static void initialize(void) {
+ physical_disk_initialize(&system_physical_total);
+
+ logicalDisks = dictionary_create_advanced(DICT_OPTION_DONT_OVERWRITE_VALUE |
+ DICT_OPTION_FIXED_SIZE, NULL, sizeof(struct logical_disk));
+
+ dictionary_register_insert_callback(logicalDisks, dict_logical_disk_insert_cb, NULL);
+
+ physicalDisks = dictionary_create_advanced(DICT_OPTION_DONT_OVERWRITE_VALUE |
+ DICT_OPTION_FIXED_SIZE, NULL, sizeof(struct physical_disk));
+
+ dictionary_register_insert_callback(physicalDisks, dict_physical_disk_insert_cb, NULL);
+}
+
+static STRING *getFileSystemType(struct logical_disk *d, const char* diskName) {
+ if (!diskName || !*diskName) return NULL;
+
+ char fileSystemNameBuffer[128] = {0}; // Buffer for file system name
+ char pathBuffer[256] = {0}; // Path buffer to accommodate different formats
+ DWORD serialNumber = 0;
+ DWORD maxComponentLength = 0;
+ DWORD fileSystemFlags = 0;
+ BOOL success;
+
+ // Check if the input is likely a drive letter (e.g., "C:")
+ if (isalpha((uint8_t)diskName[0]) && diskName[1] == ':' && diskName[2] == '\0')
+ snprintf(pathBuffer, sizeof(pathBuffer), "%s\\", diskName); // Format as "C:\"
+ else
+ // Assume it's a Volume GUID path or a device path
+ snprintf(pathBuffer, sizeof(pathBuffer), "\\\\.\\%s\\", diskName); // Format as "\\.\HarddiskVolume1\"
+
+ d->DriveType = GetDriveTypeA(pathBuffer);
+
+ // Attempt to get the volume information
+ success = GetVolumeInformationA(
+ pathBuffer, // Path to the disk
+ NULL, // We don't need the volume name
+ 0, // Size of volume name buffer is 0
+ &serialNumber, // Volume serial number
+ &maxComponentLength, // Maximum component length
+ &fileSystemFlags, // File system flags
+ fileSystemNameBuffer, // File system name buffer
+ sizeof(fileSystemNameBuffer) // Size of file system name buffer
+ );
+
+ if(success) {
+ d->readonly = fileSystemFlags & FILE_READ_ONLY_VOLUME;
+ d->SerialNumber = serialNumber;
+
+ if (fileSystemNameBuffer[0]) {
+ char *s = fileSystemNameBuffer;
+ while (*s) {
+ *s = tolower((uint8_t) *s);
+ s++;
+ }
+ return string_strdupz(fileSystemNameBuffer); // Duplicate the file system name
+ }
+ }
+ return NULL;
+}
+
+static const char *drive_type_to_str(UINT type) {
+ switch(type) {
+ default:
+ case 0: return "unknown";
+ case 1: return "norootdir";
+ case 2: return "removable";
+ case 3: return "fixed";
+ case 4: return "remote";
+ case 5: return "cdrom";
+ case 6: return "ramdisk";
+ }
+}
+
+static bool do_logical_disk(PERF_DATA_BLOCK *pDataBlock, int update_every, usec_t now_ut) {
+ DICTIONARY *dict = logicalDisks;
+
+ PERF_OBJECT_TYPE *pObjectType = perflibFindObjectTypeByName(pDataBlock, "LogicalDisk");
+ if(!pObjectType) return false;
+
+ PERF_INSTANCE_DEFINITION *pi = NULL;
+ for(LONG i = 0; i < pObjectType->NumInstances ; i++) {
+ pi = perflibForEachInstance(pDataBlock, pObjectType, pi);
+ if(!pi) break;
+
+ if(!getInstanceName(pDataBlock, pObjectType, pi, windows_shared_buffer, sizeof(windows_shared_buffer)))
+ strncpyz(windows_shared_buffer, "[unknown]", sizeof(windows_shared_buffer) - 1);
+
+ if(strcasecmp(windows_shared_buffer, "_Total") == 0)
+ continue;
+
+ struct logical_disk *d = dictionary_set(dict, windows_shared_buffer, NULL, sizeof(*d));
+ d->last_collected = now_ut;
+
+ if(!d->collected_metadata) {
+ d->filesystem = getFileSystemType(d, windows_shared_buffer);
+ d->collected_metadata = true;
+ }
+
+ perflibGetInstanceCounter(pDataBlock, pObjectType, pi, &d->percentDiskFree);
+ // perflibGetInstanceCounter(pDataBlock, pObjectType, pi, &d->freeMegabytes);
+
+ if(!d->st_disk_space) {
+ d->st_disk_space = rrdset_create_localhost(
+ "disk_space"
+ , windows_shared_buffer
+ , NULL
+ , windows_shared_buffer
+ , "disk.space"
+ , "Disk Space Usage"
+ , "GiB"
+ , PLUGIN_WINDOWS_NAME
+ , "PerflibStorage"
+ , NETDATA_CHART_PRIO_DISKSPACE_SPACE
+ , update_every
+ , RRDSET_TYPE_STACKED
+ );
+
+ rrdlabels_add(d->st_disk_space->rrdlabels, "mount_point", windows_shared_buffer, RRDLABEL_SRC_AUTO);
+ rrdlabels_add(d->st_disk_space->rrdlabels, "drive_type", drive_type_to_str(d->DriveType), RRDLABEL_SRC_AUTO);
+ rrdlabels_add(d->st_disk_space->rrdlabels, "filesystem", d->filesystem ? string2str(d->filesystem) : "unknown", RRDLABEL_SRC_AUTO);
+ rrdlabels_add(d->st_disk_space->rrdlabels, "rw_mode", d->readonly ? "ro" : "rw", RRDLABEL_SRC_AUTO);
+
+ {
+ char buf[UINT64_HEX_MAX_LENGTH];
+ print_uint64_hex(buf, d->SerialNumber);
+ rrdlabels_add(d->st_disk_space->rrdlabels, "serial_number", buf, RRDLABEL_SRC_AUTO);
+ }
+
+ d->rd_disk_space_free = rrddim_add(d->st_disk_space, "avail", NULL, 1, 1024, RRD_ALGORITHM_ABSOLUTE);
+ d->rd_disk_space_used = rrddim_add(d->st_disk_space, "used", NULL, 1, 1024, RRD_ALGORITHM_ABSOLUTE);
+ }
+
+ // percentDiskFree has the free space in Data and the size of the disk in Time, in MiB.
+ rrddim_set_by_pointer(d->st_disk_space, d->rd_disk_space_free, (collected_number)d->percentDiskFree.current.Data);
+ rrddim_set_by_pointer(d->st_disk_space, d->rd_disk_space_used, (collected_number)(d->percentDiskFree.current.Time - d->percentDiskFree.current.Data));
+ rrdset_done(d->st_disk_space);
+ }
+
+ // cleanup
+ {
+ struct logical_disk *d;
+ dfe_start_write(dict, d) {
+ if(d->last_collected < now_ut) {
+ logical_disk_cleanup(d);
+ dictionary_del(dict, d_dfe.name);
+ }
+ }
+ dfe_done(d);
+ dictionary_garbage_collect(dict);
+ }
+
+ return true;
+}
+
+static void physical_disk_labels(RRDSET *st, void *data) {
+ struct physical_disk *d = data;
+
+ if(d->device)
+ rrdlabels_add(st->rrdlabels, "device", string2str(d->device), RRDLABEL_SRC_AUTO);
+
+ if (d->mount_point)
+ rrdlabels_add(st->rrdlabels, "mount_point", string2str(d->mount_point), RRDLABEL_SRC_AUTO);
+
+// if (d->manufacturer)
+// rrdlabels_add(st->rrdlabels, "manufacturer", string2str(d->manufacturer), RRDLABEL_SRC_AUTO);
+
+ if (d->model)
+ rrdlabels_add(st->rrdlabels, "model", string2str(d->model), RRDLABEL_SRC_AUTO);
+
+// if (d->media_type)
+// rrdlabels_add(st->rrdlabels, "media_type", string2str(d->media_type), RRDLABEL_SRC_AUTO);
+
+// if (d->name)
+// rrdlabels_add(st->rrdlabels, "name", string2str(d->name), RRDLABEL_SRC_AUTO);
+
+ if (d->device_id)
+ rrdlabels_add(st->rrdlabels, "device_id", string2str(d->device_id), RRDLABEL_SRC_AUTO);
+}
+
+static bool str_is_numeric(const char *s) {
+ while(*s) if(!isdigit((uint8_t)*s++)) return false;
+ return true;
+}
+
+static inline double perflib_average_timer_ms(COUNTER_DATA *d) {
+ if(!d->updated) return 0.0;
+
+ ULONGLONG data1 = d->current.Data;
+ ULONGLONG data0 = d->previous.Data;
+ LONGLONG time1 = d->current.Time;
+ LONGLONG time0 = d->previous.Time;
+ LONGLONG freq1 = d->current.Frequency;
+
+ if(data1 >= data0 && time1 > time0 && time0 && freq1)
+ return ((double)(data1 - data0) / (double)(freq1 / MSEC_PER_SEC)) / (double)(time1 - time0);
+
+ return 0;
+}
+
+static inline uint64_t perflib_average_bulk(COUNTER_DATA *d) {
+ if(!d->updated) return 0;
+
+ ULONGLONG data1 = d->current.Data;
+ ULONGLONG data0 = d->previous.Data;
+ LONGLONG time1 = d->current.Time;
+ LONGLONG time0 = d->previous.Time;
+
+ if(data1 >= data0 && time1 > time0 && time0)
+ return (data1 - data0) / (time1 - time0);
+
+ return 0;
+}
+
+static inline uint64_t perflib_idle_time_percent(COUNTER_DATA *d) {
+ if(!d->updated) return 0.0;
+
+ ULONGLONG data1 = d->current.Data;
+ ULONGLONG data0 = d->previous.Data;
+ LONGLONG time1 = d->current.Time;
+ LONGLONG time0 = d->previous.Time;
+
+ if(data1 >= data0 && time1 > time0 && time0) {
+ uint64_t pcent = 100 * (data1 - data0) / (time1 - time0);
+ return pcent > 100 ? 100 : pcent;
+ }
+
+ return 0;
+}
+
+#define MAX_WMI_DRIVES 100
+static DiskDriveInfoWMI infos[MAX_WMI_DRIVES];
+
+static bool do_physical_disk(PERF_DATA_BLOCK *pDataBlock, int update_every, usec_t now_ut) {
+ DICTIONARY *dict = physicalDisks;
+
+ PERF_OBJECT_TYPE *pObjectType = perflibFindObjectTypeByName(pDataBlock, "PhysicalDisk");
+ if(!pObjectType) return false;
+
+ PERF_INSTANCE_DEFINITION *pi = NULL;
+ for (LONG i = 0; i < pObjectType->NumInstances; i++) {
+ pi = perflibForEachInstance(pDataBlock, pObjectType, pi);
+ if (!pi)
+ break;
+
+ if (!getInstanceName(pDataBlock, pObjectType, pi, windows_shared_buffer, sizeof(windows_shared_buffer)))
+ strncpyz(windows_shared_buffer, "[unknown]", sizeof(windows_shared_buffer) - 1);
+
+ int device_index = -1;
+ char *device = windows_shared_buffer;
+ char mount_point[128]; mount_point[0] = '\0';
+
+ struct physical_disk *d;
+ bool is_system;
+ if (strcasecmp(windows_shared_buffer, "_Total") == 0) {
+ d = &system_physical_total;
+ is_system = true;
+ }
+ else {
+ char *space;
+ if((space = strchr(windows_shared_buffer, ' '))) {
+ *space++ = '\0';
+ strncpyz(mount_point, space, sizeof(mount_point) - 1);
+ }
+
+ if(str_is_numeric(windows_shared_buffer)) {
+ device_index = str2ull(device, NULL);
+ snprintfz(windows_shared_buffer, sizeof(windows_shared_buffer), "Disk %d", device_index);
+ device = windows_shared_buffer;
+ }
+
+ d = dictionary_set(dict, device, NULL, sizeof(*d));
+ is_system = false;
+ }
+ d->last_collected = now_ut;
+
+ if (!d->collected_metadata) {
+ if(!is_system && device_index != -1) {
+ size_t infoCount = GetDiskDriveInfo(infos, _countof(infos));
+ for(size_t k = 0; k < infoCount ; k++) {
+ if(infos[k].Index != device_index)
+ continue;
+
+ d->manufacturer = string_strdupz(infos[k].Manufacturer);
+ d->model = string_strdupz(infos[k].Model);
+ d->media_type = string_strdupz(infos[k].MediaType);
+ d->name = string_strdupz(infos[k].Name);
+ d->device_id = string_strdupz(infos[k].DeviceID);
+
+ break;
+ }
+ }
+
+ d->device = string_strdupz(device);
+ d->mount_point = string_strdupz(mount_point);
+ d->collected_metadata = true;
+ }
+
+ if (perflibGetInstanceCounter(pDataBlock, pObjectType, pi, &d->diskReadBytesPerSec) &&
+ perflibGetInstanceCounter(pDataBlock, pObjectType, pi, &d->diskWriteBytesPerSec)) {
+ if(is_system)
+ common_system_io(
+ d->diskReadBytesPerSec.current.Data,
+ d->diskWriteBytesPerSec.current.Data,
+ update_every);
+ else
+ common_disk_io(
+ &d->disk_io,
+ device,
+ NULL,
+ d->diskReadBytesPerSec.current.Data,
+ d->diskWriteBytesPerSec.current.Data,
+ update_every,
+ physical_disk_labels,
+ d);
+ }
+
+ if(is_system) continue;
+
+ if (perflibGetInstanceCounter(pDataBlock, pObjectType, pi, &d->diskReadsPerSec) &&
+ perflibGetInstanceCounter(pDataBlock, pObjectType, pi, &d->diskWritesPerSec)) {
+
+ common_disk_ops(
+ &d->disk_ops,
+ device,
+ NULL,
+ d->diskReadsPerSec.current.Data,
+ d->diskWritesPerSec.current.Data,
+ update_every,
+ physical_disk_labels,
+ d);
+ }
+
+ if (perflibGetInstanceCounter(pDataBlock, pObjectType, pi, &d->percentIdleTime)) {
+ common_disk_util(
+ &d->disk_util,
+ device,
+ NULL,
+ 100 - perflib_idle_time_percent(&d->percentIdleTime),
+ update_every,
+ physical_disk_labels,
+ d);
+ }
+
+ if (perflibGetInstanceCounter(pDataBlock, pObjectType, pi, &d->percentDiskTime)) {
+ common_disk_busy(
+ &d->disk_busy,
+ device,
+ NULL,
+ d->percentDiskTime.current.Data / NS100_PER_MS,
+ update_every,
+ physical_disk_labels,
+ d);
+ }
+
+ if (perflibGetInstanceCounter(pDataBlock, pObjectType, pi, &d->percentDiskReadTime) &&
+ perflibGetInstanceCounter(pDataBlock, pObjectType, pi, &d->percentDiskWriteTime)) {
+
+ common_disk_iotime(
+ &d->disk_iotime,
+ device,
+ NULL,
+ d->percentDiskReadTime.current.Data / NS100_PER_MS,
+ d->percentDiskWriteTime.current.Data / NS100_PER_MS,
+ update_every,
+ physical_disk_labels,
+ d);
+ }
+
+ if(perflibGetInstanceCounter(pDataBlock, pObjectType, pi, &d->currentDiskQueueLength)) {
+ common_disk_qops(
+ &d->disk_qops,
+ device,
+ NULL,
+ d->currentDiskQueueLength.current.Data,
+ update_every,
+ physical_disk_labels,
+ d);
+ }
+
+ if (perflibGetInstanceCounter(pDataBlock, pObjectType, pi, &d->averageDiskSecondsPerRead) &&
+ perflibGetInstanceCounter(pDataBlock, pObjectType, pi, &d->averageDiskSecondsPerWrite)) {
+
+ common_disk_await(
+ &d->disk_await,
+ device,
+ NULL,
+ perflib_average_timer_ms(&d->averageDiskSecondsPerRead),
+ perflib_average_timer_ms(&d->averageDiskSecondsPerWrite),
+ update_every,
+ physical_disk_labels,
+ d);
+ }
+
+ if (perflibGetInstanceCounter(pDataBlock, pObjectType, pi, &d->averageDiskSecondsPerTransfer)) {
+ common_disk_svctm(
+ &d->disk_svctm,
+ device,
+ NULL,
+ perflib_average_timer_ms(&d->averageDiskSecondsPerTransfer),
+ update_every,
+ physical_disk_labels,
+ d);
+ }
+
+ if (perflibGetInstanceCounter(pDataBlock, pObjectType, pi, &d->averageDiskBytesPerRead) &&
+ perflibGetInstanceCounter(pDataBlock, pObjectType, pi, &d->averageDiskBytesPerWrite)) {
+
+ common_disk_avgsz(
+ &d->disk_avgsz,
+ device,
+ NULL,
+ perflib_average_bulk(&d->averageDiskBytesPerRead),
+ perflib_average_bulk(&d->averageDiskBytesPerWrite),
+ update_every,
+ physical_disk_labels,
+ d);
+ }
+
+ if(perflibGetInstanceCounter(pDataBlock, pObjectType, pi, &d->splitIoPerSec)) {
+ if (!d->st_split) {
+ d->st_split = rrdset_create_localhost(
+ "disk_split",
+ device,
+ NULL,
+ "iops",
+ "disk.split",
+ "Split I/O Operations",
+ "operations/s",
+ _COMMON_PLUGIN_NAME,
+ _COMMON_PLUGIN_MODULE_NAME,
+ NETDATA_CHART_PRIO_DISK_SPLIT,
+ update_every,
+ RRDSET_TYPE_LINE
+ );
+
+ d->rd_split = rrddim_add(d->st_split, "discards", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+
+ physical_disk_labels(d->st_split, d);
+ }
+
+ rrddim_set_by_pointer(d->st_split, d->rd_split, d->splitIoPerSec.current.Data);
+ rrdset_done(d->st_split);
+ }
+ }
+
+ // cleanup
+ {
+ struct physical_disk *d;
+ dfe_start_write(dict, d) {
+ if(d->last_collected < now_ut) {
+ physical_disk_cleanup(d);
+ dictionary_del(dict, d_dfe.name);
+ }
+ }
+ dfe_done(d);
+ dictionary_garbage_collect(dict);
+ }
+
+ return true;
+}
+
+int do_PerflibStorage(int update_every, usec_t dt __maybe_unused) {
+ static bool initialized = false;
+
+ if(unlikely(!initialized)) {
+ initialize();
+ initialized = true;
+ }
+
+ DWORD id = RegistryFindIDByName("LogicalDisk");
+ if(id == PERFLIB_REGISTRY_NAME_NOT_FOUND)
+ return -1;
+
+ PERF_DATA_BLOCK *pDataBlock = perflibGetPerformanceData(id);
+ if(!pDataBlock) return -1;
+
+ usec_t now_ut = now_monotonic_usec();
+ do_logical_disk(pDataBlock, update_every, now_ut);
+ do_physical_disk(pDataBlock, update_every, now_ut);
+
+ return 0;
+}
diff --git a/src/collectors/windows.plugin/perflib-thermalzone.c b/src/collectors/windows.plugin/perflib-thermalzone.c
new file mode 100644
index 000000000..f85ba019f
--- /dev/null
+++ b/src/collectors/windows.plugin/perflib-thermalzone.c
@@ -0,0 +1,103 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "windows_plugin.h"
+#include "windows-internals.h"
+
+struct thermal_zone {
+ RRDSET *st;
+ RRDDIM *rd;
+
+ COUNTER_DATA thermalZoneTemperature;
+};
+
+static inline void initialize_thermal_zone_keys(struct thermal_zone *p) {
+ p->thermalZoneTemperature.key = "Temperature";
+}
+
+void dict_thermal_zone_insert_cb(const DICTIONARY_ITEM *item __maybe_unused, void *value, void *data __maybe_unused) {
+ struct thermal_zone *p = value;
+ initialize_thermal_zone_keys(p);
+}
+
+static DICTIONARY *thermal_zones = NULL;
+
+static void initialize(void) {
+ thermal_zones = dictionary_create_advanced(
+ DICT_OPTION_DONT_OVERWRITE_VALUE | DICT_OPTION_FIXED_SIZE, NULL, sizeof(struct thermal_zone));
+
+ dictionary_register_insert_callback(thermal_zones, dict_thermal_zone_insert_cb, NULL);
+}
+
+static bool do_thermal_zones(PERF_DATA_BLOCK *pDataBlock, int update_every) {
+ PERF_OBJECT_TYPE *pObjectType = perflibFindObjectTypeByName(pDataBlock, "Thermal Zone Information");
+ if (!pObjectType)
+ return false;
+
+ PERF_INSTANCE_DEFINITION *pi = NULL;
+ for (LONG i = 0; i < pObjectType->NumInstances; i++) {
+ pi = perflibForEachInstance(pDataBlock, pObjectType, pi);
+ if (!pi)
+ break;
+
+ if (!getInstanceName(pDataBlock, pObjectType, pi, windows_shared_buffer, sizeof(windows_shared_buffer)))
+ strncpyz(windows_shared_buffer, "[unknown]", sizeof(windows_shared_buffer) - 1);
+
+ struct thermal_zone *p = dictionary_set(thermal_zones, windows_shared_buffer, NULL, sizeof(*p));
+
+ perflibGetInstanceCounter(pDataBlock, pObjectType, pi, &p->thermalZoneTemperature);
+
+ // https://learn.microsoft.com/en-us/windows-hardware/design/device-experiences/design-guide
+ if (!p->st) {
+ char id[RRD_ID_LENGTH_MAX + 1];
+ snprintfz(id, RRD_ID_LENGTH_MAX, "thermalzone_%s_temperature", windows_shared_buffer);
+ netdata_fix_chart_name(id);
+ p->st = rrdset_create_localhost(
+ "system",
+ id,
+ NULL,
+ "thermalzone",
+ "system.thermalzone_temperature",
+ "Thermal zone temperature",
+ "Celsius",
+ PLUGIN_WINDOWS_NAME,
+ "ThermalZone",
+ NETDATA_CHART_PRIO_WINDOWS_THERMAL_ZONES,
+ update_every,
+ RRDSET_TYPE_LINE);
+
+ p->rd = rrddim_add(p->st, id, "temperature", 1, 1, RRD_ALGORITHM_ABSOLUTE);
+
+ rrdlabels_add(p->st->rrdlabels, "thermalzone", windows_shared_buffer, RRDLABEL_SRC_AUTO);
+ }
+
+ // Convert to Celsius before to plot
+ NETDATA_DOUBLE kTemperature = (NETDATA_DOUBLE)p->thermalZoneTemperature.current.Data;
+ kTemperature -= 273.15;
+
+ rrddim_set_by_pointer(p->st, p->rd, (collected_number)kTemperature);
+ rrdset_done(p->st);
+ }
+
+ return true;
+}
+
+int do_PerflibThermalZone(int update_every, usec_t dt __maybe_unused) {
+ static bool initialized = false;
+
+ if (unlikely(!initialized)) {
+ initialize();
+ initialized = true;
+ }
+
+ DWORD id = RegistryFindIDByName("Thermal Zone Information");
+ if (id == PERFLIB_REGISTRY_NAME_NOT_FOUND)
+ return -1;
+
+ PERF_DATA_BLOCK *pDataBlock = perflibGetPerformanceData(id);
+ if (!pDataBlock)
+ return -1;
+
+ do_thermal_zones(pDataBlock, update_every);
+
+ return 0;
+}
diff --git a/src/collectors/windows.plugin/perflib-web-service.c b/src/collectors/windows.plugin/perflib-web-service.c
new file mode 100644
index 000000000..159f6e0ee
--- /dev/null
+++ b/src/collectors/windows.plugin/perflib-web-service.c
@@ -0,0 +1,669 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "windows_plugin.h"
+#include "windows-internals.h"
+
+struct web_service {
+ RRDSET *st_request_rate;
+ RRDDIM *rd_request_rate;
+
+ RRDSET *st_request_by_type_rate;
+ RRDDIM *rd_request_options_rate;
+ RRDDIM *rd_request_get_rate;
+ RRDDIM *rd_request_post_rate;
+ RRDDIM *rd_request_head_rate;
+ RRDDIM *rd_request_put_rate;
+ RRDDIM *rd_request_delete_rate;
+ RRDDIM *rd_request_trace_rate;
+ RRDDIM *rd_request_move_rate;
+ RRDDIM *rd_request_copy_rate;
+ RRDDIM *rd_request_mkcol_rate;
+ RRDDIM *rd_request_propfind_rate;
+ RRDDIM *rd_request_proppatch_rate;
+ RRDDIM *rd_request_search_rate;
+ RRDDIM *rd_request_lock_rate;
+ RRDDIM *rd_request_unlock_rate;
+ RRDDIM *rd_request_other_rate;
+
+ RRDSET *st_traffic;
+ RRDDIM *rd_traffic_received;
+ RRDDIM *rd_traffic_sent;
+
+ RRDSET *st_file_transfer;
+ RRDDIM *rd_files_received;
+ RRDDIM *rd_files_sent;
+
+ RRDSET *st_curr_connections;
+ RRDDIM *rd_curr_connections;
+
+ RRDSET *st_connections_attemps;
+ RRDDIM *rd_connections_attemps;
+
+ RRDSET *st_user_count;
+ RRDDIM *rd_user_anonymous;
+ RRDDIM *rd_user_nonanonymous;
+
+ RRDSET *st_isapi_extension_request_count;
+ RRDDIM *rd_isapi_extension_request_count;
+
+ RRDSET *st_isapi_extension_request_rate;
+ RRDDIM *rd_isapi_extension_request_rate;
+
+ RRDSET *st_error_rate;
+ RRDDIM *rd_error_rate_locked;
+ RRDDIM *rd_error_rate_not_found;
+
+ RRDSET *st_logon_attemps;
+ RRDDIM *rd_logon_attemps;
+
+ RRDSET *st_service_uptime;
+ RRDDIM *rd_service_uptime;
+
+ COUNTER_DATA IISCurrentAnonymousUser;
+ COUNTER_DATA IISCurrentNonAnonymousUsers;
+ COUNTER_DATA IISCurrentConnections;
+ COUNTER_DATA IISCurrentISAPIExtRequests;
+ COUNTER_DATA IISUptime;
+
+ COUNTER_DATA IISReceivedBytesTotal;
+ COUNTER_DATA IISSentBytesTotal;
+ COUNTER_DATA IISIPAPIExtRequestsTotal;
+ COUNTER_DATA IISConnAttemptsAllInstancesTotal;
+ COUNTER_DATA IISFilesReceivedTotal;
+ COUNTER_DATA IISFilesSentTotal;
+ COUNTER_DATA IISLogonAttemptsTotal;
+ COUNTER_DATA IISLockedErrorsTotal;
+ COUNTER_DATA IISNotFoundErrorsTotal;
+
+ COUNTER_DATA IISRequestsOptions;
+ COUNTER_DATA IISRequestsGet;
+ COUNTER_DATA IISRequestsPost;
+ COUNTER_DATA IISRequestsHead;
+ COUNTER_DATA IISRequestsPut;
+ COUNTER_DATA IISRequestsDelete;
+ COUNTER_DATA IISRequestsTrace;
+ COUNTER_DATA IISRequestsMove;
+ COUNTER_DATA IISRequestsCopy;
+ COUNTER_DATA IISRequestsMkcol;
+ COUNTER_DATA IISRequestsPropfind;
+ COUNTER_DATA IISRequestsProppatch;
+ COUNTER_DATA IISRequestsSearch;
+ COUNTER_DATA IISRequestsLock;
+ COUNTER_DATA IISRequestsUnlock;
+ COUNTER_DATA IISRequestsOther;
+};
+
+static inline void initialize_web_service_keys(struct web_service *p) {
+ p->IISCurrentAnonymousUser.key = "Current Anonymous Users";
+ p->IISCurrentNonAnonymousUsers.key = "Current NonAnonymous Users";
+ p->IISCurrentConnections.key = "Current Connections";
+ p->IISCurrentISAPIExtRequests.key = "Current ISAPI Extension Requests";
+ p->IISUptime.key = "Service Uptime";
+
+ p->IISReceivedBytesTotal.key = "Total Bytes Received";
+ p->IISSentBytesTotal.key = "Total Bytes Sent";
+ p->IISIPAPIExtRequestsTotal.key = "Total ISAPI Extension Requests";
+ p->IISConnAttemptsAllInstancesTotal.key = "Total Connection Attempts (all instances)";
+ p->IISFilesReceivedTotal.key = "Total Files Received";
+ p->IISFilesSentTotal.key = "Total Files Sent";
+ p->IISLogonAttemptsTotal.key = "Total Logon Attempts";
+ p->IISLockedErrorsTotal.key = "Total Locked Errors";
+ p->IISNotFoundErrorsTotal.key = "Total Not Found Errors";
+
+ p->IISRequestsOptions.key = "Options Requests/sec";
+ p->IISRequestsGet.key = "Get Requests/sec";
+ p->IISRequestsPost.key = "Post Requests/sec";
+ p->IISRequestsHead.key = "Head Requests/sec";
+ p->IISRequestsPut.key = "Put Requests/sec";
+ p->IISRequestsDelete.key = "Delete Requests/sec";
+ p->IISRequestsTrace.key = "Trace Requests/sec";
+ p->IISRequestsMove.key = "Move Requests/sec";
+ p->IISRequestsCopy.key = "Copy Requests/sec";
+ p->IISRequestsMkcol.key = "Mkcol Requests/sec";
+ p->IISRequestsPropfind.key = "Propfind Requests/sec";
+ p->IISRequestsProppatch.key = "Proppatch Requests/sec";
+ p->IISRequestsSearch.key = "Search Requests/sec";
+ p->IISRequestsLock.key = "Lock Requests/sec";
+ p->IISRequestsUnlock.key = "Unlock Requests/sec";
+ p->IISRequestsOther.key = "Other Request Methods/sec";
+}
+
+void dict_web_service_insert_cb(const DICTIONARY_ITEM *item __maybe_unused, void *value, void *data __maybe_unused) {
+ struct web_service *p = value;
+ initialize_web_service_keys(p);
+}
+
+static DICTIONARY *web_services = NULL;
+
+static void initialize(void) {
+ web_services = dictionary_create_advanced(
+ DICT_OPTION_DONT_OVERWRITE_VALUE | DICT_OPTION_FIXED_SIZE, NULL, sizeof(struct web_service));
+
+ dictionary_register_insert_callback(web_services, dict_web_service_insert_cb, NULL);
+}
+
+static bool do_web_services(PERF_DATA_BLOCK *pDataBlock, int update_every) {
+ char id[RRD_ID_LENGTH_MAX + 1];
+ PERF_OBJECT_TYPE *pObjectType = perflibFindObjectTypeByName(pDataBlock, "Web Service");
+ if (!pObjectType)
+ return false;
+
+ PERF_INSTANCE_DEFINITION *pi = NULL;
+ for (LONG i = 0; i < pObjectType->NumInstances; i++) {
+ pi = perflibForEachInstance(pDataBlock, pObjectType, pi);
+ if (!pi)
+ break;
+
+ if (!getInstanceName(pDataBlock, pObjectType, pi, windows_shared_buffer, sizeof(windows_shared_buffer)))
+ strncpyz(windows_shared_buffer, "[unknown]", sizeof(windows_shared_buffer) - 1);
+
+ // We are not ploting _Total here, because cloud will group the sites
+ if (strcasecmp(windows_shared_buffer, "_Total") == 0) {
+ continue;
+ }
+
+ struct web_service *p = dictionary_set(web_services, windows_shared_buffer, NULL, sizeof(*p));
+
+ if (perflibGetInstanceCounter(pDataBlock, pObjectType, pi, &p->IISReceivedBytesTotal) &&
+ perflibGetInstanceCounter(pDataBlock, pObjectType, pi, &p->IISSentBytesTotal)) {
+ if (!p->st_traffic) {
+ snprintfz(id, RRD_ID_LENGTH_MAX, "website_%s_traffic", windows_shared_buffer);
+ netdata_fix_chart_name(id);
+ p->st_traffic = rrdset_create_localhost(
+ "iis",
+ id,
+ NULL,
+ "traffic",
+ "iis.website_traffic",
+ "Website traffic",
+ "bytes/s",
+ PLUGIN_WINDOWS_NAME,
+ "PerflibWebService",
+ PRIO_WEBSITE_IIS_TRAFFIC,
+ update_every,
+ RRDSET_TYPE_AREA);
+
+ p->rd_traffic_received = rrddim_add(p->st_traffic, "received", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ p->rd_traffic_sent = rrddim_add(p->st_traffic, "sent", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rrdlabels_add(p->st_traffic->rrdlabels, "website", windows_shared_buffer, RRDLABEL_SRC_AUTO);
+ }
+
+ rrddim_set_by_pointer(
+ p->st_traffic, p->rd_traffic_received, (collected_number)p->IISReceivedBytesTotal.current.Data);
+ rrddim_set_by_pointer(
+ p->st_traffic, p->rd_traffic_sent, (collected_number)p->IISSentBytesTotal.current.Data);
+
+ rrdset_done(p->st_traffic);
+ }
+
+ if (perflibGetInstanceCounter(pDataBlock, pObjectType, pi, &p->IISFilesReceivedTotal) &&
+ perflibGetInstanceCounter(pDataBlock, pObjectType, pi, &p->IISFilesSentTotal)) {
+ if (!p->st_file_transfer) {
+ snprintfz(id, RRD_ID_LENGTH_MAX, "website_%s_ftp_file_transfer_rate", windows_shared_buffer);
+ netdata_fix_chart_name(id);
+ p->st_file_transfer = rrdset_create_localhost(
+ "iis",
+ id,
+ NULL,
+ "traffic",
+ "iis.website_ftp_file_transfer_rate",
+ "Website FTP file transfer rate",
+ "files/s",
+ PLUGIN_WINDOWS_NAME,
+ "PerflibWebService",
+ PRIO_WEBSITE_IIS_FTP_FILE_TRANSFER_RATE,
+ update_every,
+ RRDSET_TYPE_LINE);
+
+ p->rd_files_received = rrddim_add(p->st_file_transfer, "received", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ p->rd_files_sent = rrddim_add(p->st_file_transfer, "sent", NULL, -1, 1, RRD_ALGORITHM_INCREMENTAL);
+ rrdlabels_add(p->st_file_transfer->rrdlabels, "website", windows_shared_buffer, RRDLABEL_SRC_AUTO);
+ }
+
+ rrddim_set_by_pointer(
+ p->st_file_transfer, p->rd_files_received, (collected_number)p->IISFilesReceivedTotal.current.Data);
+ rrddim_set_by_pointer(
+ p->st_file_transfer, p->rd_files_sent, (collected_number)p->IISFilesSentTotal.current.Data);
+
+ rrdset_done(p->st_file_transfer);
+ }
+
+ if (perflibGetInstanceCounter(pDataBlock, pObjectType, pi, &p->IISCurrentConnections)) {
+ if (!p->st_curr_connections) {
+ snprintfz(id, RRD_ID_LENGTH_MAX, "website_%s_active_connections_count", windows_shared_buffer);
+ netdata_fix_chart_name(id);
+ p->st_curr_connections = rrdset_create_localhost(
+ "iis",
+ id,
+ NULL,
+ "connections",
+ "iis.website_active_connections_count",
+ "Website active connections",
+ "connections",
+ PLUGIN_WINDOWS_NAME,
+ "PerflibWebService1",
+ PRIO_WEBSITE_IIS_ACTIVE_CONNECTIONS_COUNT,
+ update_every,
+ RRDSET_TYPE_LINE);
+
+ p->rd_curr_connections = rrddim_add(p->st_curr_connections, "active", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ rrdlabels_add(p->st_curr_connections->rrdlabels, "website", windows_shared_buffer, RRDLABEL_SRC_AUTO);
+ }
+
+ rrddim_set_by_pointer(
+ p->st_curr_connections,
+ p->rd_curr_connections,
+ (collected_number)p->IISCurrentConnections.current.Data);
+
+ rrdset_done(p->st_curr_connections);
+ }
+
+ if (perflibGetInstanceCounter(pDataBlock, pObjectType, pi, &p->IISConnAttemptsAllInstancesTotal)) {
+ if (!p->st_connections_attemps) {
+ snprintfz(id, RRD_ID_LENGTH_MAX, "website_%s_connection_attempts_rate", windows_shared_buffer);
+ netdata_fix_chart_name(id);
+ p->st_connections_attemps = rrdset_create_localhost(
+ "iis",
+ id,
+ NULL,
+ "connections",
+ "iis.website_connection_attempts_rate",
+ "Website connections attempts",
+ "attempts/s",
+ PLUGIN_WINDOWS_NAME,
+ "PerflibWebService",
+ PRIO_WEBSITE_IIS_CONNECTIONS_ATTEMP,
+ update_every,
+ RRDSET_TYPE_LINE);
+
+ p->rd_connections_attemps =
+ rrddim_add(p->st_connections_attemps, "connection", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+
+ rrdlabels_add(
+ p->st_connections_attemps->rrdlabels, "website", windows_shared_buffer, RRDLABEL_SRC_AUTO);
+ }
+
+ rrddim_set_by_pointer(
+ p->st_connections_attemps,
+ p->rd_connections_attemps,
+ (collected_number)p->IISCurrentConnections.current.Data);
+
+ rrdset_done(p->st_connections_attemps);
+ }
+
+ if (perflibGetInstanceCounter(pDataBlock, pObjectType, pi, &p->IISCurrentAnonymousUser) &&
+ perflibGetInstanceCounter(pDataBlock, pObjectType, pi, &p->IISCurrentNonAnonymousUsers)) {
+ if (!p->st_user_count) {
+ snprintfz(id, RRD_ID_LENGTH_MAX, "website_%s_users_count", windows_shared_buffer);
+ netdata_fix_chart_name(id);
+ p->st_user_count = rrdset_create_localhost(
+ "iis",
+ id,
+ NULL,
+ "requests",
+ "iis.website_users_count",
+ "Website users with pending requests",
+ "users",
+ PLUGIN_WINDOWS_NAME,
+ "PerflibWebService",
+ PRIO_WEBSITE_IIS_USERS,
+ update_every,
+ RRDSET_TYPE_STACKED);
+
+ p->rd_user_anonymous = rrddim_add(p->st_user_count, "anonymous", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+ p->rd_user_nonanonymous =
+ rrddim_add(p->st_user_count, "non_anonymous", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+
+ rrdlabels_add(p->st_user_count->rrdlabels, "website", windows_shared_buffer, RRDLABEL_SRC_AUTO);
+ }
+
+ rrddim_set_by_pointer(
+ p->st_user_count, p->rd_user_anonymous, (collected_number)p->IISCurrentAnonymousUser.current.Data);
+
+ rrddim_set_by_pointer(
+ p->st_user_count,
+ p->rd_user_nonanonymous,
+ (collected_number)p->IISCurrentNonAnonymousUsers.current.Data);
+
+ rrdset_done(p->st_user_count);
+ }
+
+ if (perflibGetInstanceCounter(pDataBlock, pObjectType, pi, &p->IISCurrentISAPIExtRequests)) {
+ if (!p->st_isapi_extension_request_count) {
+ snprintfz(id, RRD_ID_LENGTH_MAX, "website_%s_isapi_extension_requests_count", windows_shared_buffer);
+ netdata_fix_chart_name(id);
+ p->st_isapi_extension_request_count = rrdset_create_localhost(
+ "iis",
+ id,
+ NULL,
+ "requests",
+ "iis.website_isapi_extension_requests_count",
+ "ISAPI extension requests",
+ "requests",
+ PLUGIN_WINDOWS_NAME,
+ "PerflibWebService",
+ PRIO_WEBSITE_IIS_ISAPI_EXT_REQUEST_COUNT,
+ update_every,
+ RRDSET_TYPE_LINE);
+
+ p->rd_isapi_extension_request_count =
+ rrddim_add(p->st_isapi_extension_request_count, "isapi", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+
+ rrdlabels_add(
+ p->st_isapi_extension_request_count->rrdlabels, "website", windows_shared_buffer, RRDLABEL_SRC_AUTO);
+ }
+
+ rrddim_set_by_pointer(
+ p->st_isapi_extension_request_count,
+ p->rd_isapi_extension_request_count,
+ (collected_number)p->IISCurrentISAPIExtRequests.current.Data);
+
+ rrdset_done(p->st_isapi_extension_request_count);
+ }
+
+ if (perflibGetInstanceCounter(pDataBlock, pObjectType, pi, &p->IISIPAPIExtRequestsTotal)) {
+ if (!p->st_isapi_extension_request_rate) {
+ snprintfz(id, RRD_ID_LENGTH_MAX, "website_%s_isapi_extension_requests_rate", windows_shared_buffer);
+ netdata_fix_chart_name(id);
+ p->st_isapi_extension_request_rate = rrdset_create_localhost(
+ "iis",
+ id,
+ NULL,
+ "requests",
+ "iis.website_isapi_extension_requests_rate",
+ "Website extensions request",
+ "requests/s",
+ PLUGIN_WINDOWS_NAME,
+ "PerflibWebService",
+ PRIO_WEBSITE_IIS_ISAPI_EXT_REQUEST_RATE,
+ update_every,
+ RRDSET_TYPE_LINE);
+
+ p->rd_isapi_extension_request_rate =
+ rrddim_add(p->st_isapi_extension_request_rate, "isapi", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+
+ rrdlabels_add(
+ p->st_isapi_extension_request_rate->rrdlabels, "website", windows_shared_buffer, RRDLABEL_SRC_AUTO);
+ }
+
+ rrddim_set_by_pointer(
+ p->st_isapi_extension_request_rate,
+ p->rd_isapi_extension_request_rate,
+ (collected_number)p->IISIPAPIExtRequestsTotal.current.Data);
+
+ rrdset_done(p->st_isapi_extension_request_rate);
+ }
+
+ if (perflibGetInstanceCounter(pDataBlock, pObjectType, pi, &p->IISLockedErrorsTotal) &&
+ perflibGetInstanceCounter(pDataBlock, pObjectType, pi, &p->IISNotFoundErrorsTotal)) {
+ if (!p->st_error_rate) {
+ snprintfz(id, RRD_ID_LENGTH_MAX, "website_%s_errors_rate", windows_shared_buffer);
+ netdata_fix_chart_name(id);
+ p->st_error_rate = rrdset_create_localhost(
+ "iis",
+ id,
+ NULL,
+ "requests",
+ "iis.website_errors_rate",
+ "Website errors",
+ "errors/s",
+ PLUGIN_WINDOWS_NAME,
+ "PerflibWebService",
+ PRIO_WEBSITE_IIS_USERS,
+ update_every,
+ RRDSET_TYPE_STACKED);
+
+ p->rd_error_rate_locked =
+ rrddim_add(p->st_error_rate, "document_locked", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ p->rd_error_rate_not_found =
+ rrddim_add(p->st_error_rate, "document_not_found", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+
+ rrdlabels_add(p->st_error_rate->rrdlabels, "website", windows_shared_buffer, RRDLABEL_SRC_AUTO);
+ }
+
+ rrddim_set_by_pointer(
+ p->st_error_rate, p->rd_error_rate_locked, (collected_number)p->IISLockedErrorsTotal.current.Data);
+
+ rrddim_set_by_pointer(
+ p->st_error_rate, p->rd_error_rate_not_found, (collected_number)p->IISNotFoundErrorsTotal.current.Data);
+
+ rrdset_done(p->st_error_rate);
+ }
+
+ if (perflibGetInstanceCounter(pDataBlock, pObjectType, pi, &p->IISLogonAttemptsTotal)) {
+ if (!p->st_logon_attemps) {
+ snprintfz(id, RRD_ID_LENGTH_MAX, "website_%s_logon_attempts_rate", windows_shared_buffer);
+ netdata_fix_chart_name(id);
+ p->st_logon_attemps = rrdset_create_localhost(
+ "iis",
+ id,
+ NULL,
+ "logon",
+ "iis.website_logon_attempts_rate",
+ "Website logon attempts",
+ "attempts/s",
+ PLUGIN_WINDOWS_NAME,
+ "PerflibWebService",
+ PRIO_WEBSITE_IIS_LOGON_ATTEMPTS,
+ update_every,
+ RRDSET_TYPE_LINE);
+
+ p->rd_logon_attemps = rrddim_add(p->st_logon_attemps, "logon", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+
+ rrdlabels_add(p->st_logon_attemps->rrdlabels, "website", windows_shared_buffer, RRDLABEL_SRC_AUTO);
+ }
+
+ rrddim_set_by_pointer(
+ p->st_logon_attemps, p->rd_logon_attemps, (collected_number)p->IISLogonAttemptsTotal.current.Data);
+
+ rrdset_done(p->st_logon_attemps);
+ }
+
+ if (perflibGetInstanceCounter(pDataBlock, pObjectType, pi, &p->IISUptime)) {
+ if (!p->st_service_uptime) {
+ snprintfz(id, RRD_ID_LENGTH_MAX, "website_%s_uptime", windows_shared_buffer);
+ netdata_fix_chart_name(id);
+ p->st_service_uptime = rrdset_create_localhost(
+ "iis",
+ id,
+ NULL,
+ "uptime",
+ "iis.website_uptime",
+ "Website uptime",
+ "seconds",
+ PLUGIN_WINDOWS_NAME,
+ "PerflibWebService",
+ PRIO_WEBSITE_IIS_UPTIME,
+ update_every,
+ RRDSET_TYPE_LINE);
+
+ p->rd_service_uptime = rrddim_add(p->st_service_uptime, "uptime", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+
+ rrdlabels_add(p->st_service_uptime->rrdlabels, "website", windows_shared_buffer, RRDLABEL_SRC_AUTO);
+ }
+
+ rrddim_set_by_pointer(
+ p->st_service_uptime, p->rd_service_uptime, (collected_number)p->IISUptime.current.Data);
+
+ rrdset_done(p->st_service_uptime);
+ }
+
+ if (perflibGetInstanceCounter(pDataBlock, pObjectType, pi, &p->IISRequestsOptions) &&
+ perflibGetInstanceCounter(pDataBlock, pObjectType, pi, &p->IISRequestsGet) &&
+ perflibGetInstanceCounter(pDataBlock, pObjectType, pi, &p->IISRequestsPost) &&
+ perflibGetInstanceCounter(pDataBlock, pObjectType, pi, &p->IISRequestsHead) &&
+ perflibGetInstanceCounter(pDataBlock, pObjectType, pi, &p->IISRequestsPut) &&
+ perflibGetInstanceCounter(pDataBlock, pObjectType, pi, &p->IISRequestsDelete) &&
+ perflibGetInstanceCounter(pDataBlock, pObjectType, pi, &p->IISRequestsTrace) &&
+ perflibGetInstanceCounter(pDataBlock, pObjectType, pi, &p->IISRequestsMove) &&
+ perflibGetInstanceCounter(pDataBlock, pObjectType, pi, &p->IISRequestsCopy) &&
+ perflibGetInstanceCounter(pDataBlock, pObjectType, pi, &p->IISRequestsMkcol) &&
+ perflibGetInstanceCounter(pDataBlock, pObjectType, pi, &p->IISRequestsPropfind) &&
+ perflibGetInstanceCounter(pDataBlock, pObjectType, pi, &p->IISRequestsProppatch) &&
+ perflibGetInstanceCounter(pDataBlock, pObjectType, pi, &p->IISRequestsSearch) &&
+ perflibGetInstanceCounter(pDataBlock, pObjectType, pi, &p->IISRequestsLock) &&
+ perflibGetInstanceCounter(pDataBlock, pObjectType, pi, &p->IISRequestsUnlock) &&
+ perflibGetInstanceCounter(pDataBlock, pObjectType, pi, &p->IISRequestsOther)) {
+ if (!p->st_request_rate) {
+ snprintfz(id, RRD_ID_LENGTH_MAX, "website_%s_requests_rate", windows_shared_buffer);
+ netdata_fix_chart_name(id);
+ p->st_request_rate = rrdset_create_localhost(
+ "iis",
+ id,
+ NULL,
+ "requests",
+ "iis.website_requests_rate",
+ "Website requests rate",
+ "requests/s",
+ PLUGIN_WINDOWS_NAME,
+ "PerflibWebService",
+ PRIO_WEBSITE_IIS_REQUESTS_RATE,
+ update_every,
+ RRDSET_TYPE_LINE);
+
+ p->rd_request_rate = rrddim_add(p->st_request_rate, "requests", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+
+ rrdlabels_add(p->st_request_rate->rrdlabels, "website", windows_shared_buffer, RRDLABEL_SRC_AUTO);
+ }
+
+ uint64_t requests =
+ p->IISRequestsOptions.current.Data + p->IISRequestsGet.current.Data + p->IISRequestsPost.current.Data +
+ p->IISRequestsHead.current.Data + p->IISRequestsPut.current.Data + p->IISRequestsDelete.current.Data +
+ p->IISRequestsTrace.current.Data + p->IISRequestsMove.current.Data + p->IISRequestsCopy.current.Data +
+ p->IISRequestsMkcol.current.Data + p->IISRequestsPropfind.current.Data +
+ p->IISRequestsProppatch.current.Data + p->IISRequestsSearch.current.Data +
+ p->IISRequestsLock.current.Data + p->IISRequestsUnlock.current.Data + p->IISRequestsOther.current.Data;
+
+ rrddim_set_by_pointer(p->st_request_rate, p->rd_request_rate, (collected_number)requests);
+
+ rrdset_done(p->st_request_rate);
+
+ if (!p->st_request_by_type_rate) {
+ snprintfz(id, RRD_ID_LENGTH_MAX, "website_%s_requests_by_type_rate", windows_shared_buffer);
+ netdata_fix_chart_name(id);
+ p->st_request_by_type_rate = rrdset_create_localhost(
+ "iis",
+ id,
+ NULL,
+ "requests",
+ "iis.website_requests_by_type_rate",
+ "Website requests rate",
+ "requests/s",
+ PLUGIN_WINDOWS_NAME,
+ "PerflibWebService",
+ PRIO_WEBSITE_IIS_REQUESTS_BY_TYPE_RATE,
+ update_every,
+ RRDSET_TYPE_STACKED);
+
+ p->rd_request_options_rate = rrddim_add(p->st_request_by_type_rate, "options", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ p->rd_request_get_rate = rrddim_add(p->st_request_by_type_rate, "get", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ p->rd_request_post_rate = rrddim_add(p->st_request_by_type_rate, "post", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ p->rd_request_head_rate = rrddim_add(p->st_request_by_type_rate, "head", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ p->rd_request_put_rate = rrddim_add(p->st_request_by_type_rate, "put", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ p->rd_request_delete_rate = rrddim_add(p->st_request_by_type_rate, "delete", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ p->rd_request_trace_rate = rrddim_add(p->st_request_by_type_rate, "trace", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ p->rd_request_move_rate = rrddim_add(p->st_request_by_type_rate, "move", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ p->rd_request_copy_rate = rrddim_add(p->st_request_by_type_rate, "copy", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ p->rd_request_mkcol_rate = rrddim_add(p->st_request_by_type_rate, "mkcol", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ p->rd_request_propfind_rate = rrddim_add(p->st_request_by_type_rate, "propfind", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ p->rd_request_proppatch_rate = rrddim_add(p->st_request_by_type_rate, "proppatch", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ p->rd_request_search_rate = rrddim_add(p->st_request_by_type_rate, "search", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ p->rd_request_lock_rate = rrddim_add(p->st_request_by_type_rate, "lock", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ p->rd_request_unlock_rate = rrddim_add(p->st_request_by_type_rate, "unlock", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+ p->rd_request_other_rate = rrddim_add(p->st_request_by_type_rate, "other", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+
+ rrdlabels_add(p->st_request_by_type_rate->rrdlabels, "website", windows_shared_buffer, RRDLABEL_SRC_AUTO);
+ }
+
+ rrddim_set_by_pointer(
+ p->st_request_by_type_rate,
+ p->rd_request_options_rate,
+ (collected_number)p->IISRequestsOptions.current.Data);
+ rrddim_set_by_pointer(
+ p->st_request_by_type_rate,
+ p->rd_request_get_rate,
+ (collected_number)p->IISRequestsGet.current.Data);
+ rrddim_set_by_pointer(
+ p->st_request_by_type_rate,
+ p->rd_request_post_rate,
+ (collected_number)p->IISRequestsPost.current.Data);
+ rrddim_set_by_pointer(
+ p->st_request_by_type_rate,
+ p->rd_request_head_rate,
+ (collected_number)p->IISRequestsHead.current.Data);
+ rrddim_set_by_pointer(
+ p->st_request_by_type_rate,
+ p->rd_request_put_rate,
+ (collected_number)p->IISRequestsPut.current.Data);
+ rrddim_set_by_pointer(
+ p->st_request_by_type_rate,
+ p->rd_request_delete_rate,
+ (collected_number)p->IISRequestsDelete.current.Data);
+ rrddim_set_by_pointer(
+ p->st_request_by_type_rate,
+ p->rd_request_trace_rate,
+ (collected_number)p->IISRequestsTrace.current.Data);
+ rrddim_set_by_pointer(
+ p->st_request_by_type_rate,
+ p->rd_request_move_rate,
+ (collected_number)p->IISRequestsMove.current.Data);
+ rrddim_set_by_pointer(
+ p->st_request_by_type_rate,
+ p->rd_request_copy_rate,
+ (collected_number)p->IISRequestsCopy.current.Data);
+ rrddim_set_by_pointer(
+ p->st_request_by_type_rate,
+ p->rd_request_mkcol_rate,
+ (collected_number)p->IISRequestsMkcol.current.Data);
+ rrddim_set_by_pointer(
+ p->st_request_by_type_rate,
+ p->rd_request_propfind_rate,
+ (collected_number)p->IISRequestsPropfind.current.Data);
+ rrddim_set_by_pointer(
+ p->st_request_by_type_rate,
+ p->rd_request_proppatch_rate,
+ (collected_number)p->IISRequestsProppatch.current.Data);
+ rrddim_set_by_pointer(
+ p->st_request_by_type_rate,
+ p->rd_request_search_rate,
+ (collected_number)p->IISRequestsSearch.current.Data);
+ rrddim_set_by_pointer(
+ p->st_request_by_type_rate,
+ p->rd_request_lock_rate,
+ (collected_number)p->IISRequestsLock.current.Data);
+ rrddim_set_by_pointer(
+ p->st_request_by_type_rate,
+ p->rd_request_unlock_rate,
+ (collected_number)p->IISRequestsUnlock.current.Data);
+ rrddim_set_by_pointer(
+ p->st_request_by_type_rate,
+ p->rd_request_other_rate,
+ (collected_number)p->IISRequestsOther.current.Data);
+
+ rrdset_done(p->st_request_by_type_rate);
+ }
+ }
+
+ return true;
+}
+
+int do_PerflibWebService(int update_every, usec_t dt __maybe_unused) {
+ static bool initialized = false;
+
+ if (unlikely(!initialized)) {
+ initialize();
+ initialized = true;
+ }
+
+ DWORD id = RegistryFindIDByName("Web Service");
+ if (id == PERFLIB_REGISTRY_NAME_NOT_FOUND)
+ return -1;
+
+ PERF_DATA_BLOCK *pDataBlock = perflibGetPerformanceData(id);
+ if (!pDataBlock)
+ return -1;
+
+ do_web_services(pDataBlock, update_every);
+
+ return 0;
+}
diff --git a/src/collectors/windows.plugin/perflib.c b/src/collectors/windows.plugin/perflib.c
deleted file mode 100644
index 4df48acfb..000000000
--- a/src/collectors/windows.plugin/perflib.c
+++ /dev/null
@@ -1,671 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-#include "perflib.h"
-
-// --------------------------------------------------------------------------------
-
-// Retrieve a buffer that contains the specified performance data.
-// The pwszSource parameter determines the data that GetRegistryBuffer returns.
-//
-// Typically, when calling RegQueryValueEx, you can specify zero for the size of the buffer
-// and the RegQueryValueEx will set your size variable to the required buffer size. However,
-// if the source is "Global" or one or more object index values, you will need to increment
-// the buffer size in a loop until RegQueryValueEx does not return ERROR_MORE_DATA.
-static LPBYTE getPerformanceData(const char *pwszSource) {
- static __thread DWORD size = 0;
- static __thread LPBYTE buffer = NULL;
-
- if(pwszSource == (const char *)0x01) {
- freez(buffer);
- buffer = NULL;
- size = 0;
- return NULL;
- }
-
- if(!size) {
- size = 32 * 1024;
- buffer = mallocz(size);
- }
-
- LONG status = ERROR_SUCCESS;
- while ((status = RegQueryValueEx(HKEY_PERFORMANCE_DATA, pwszSource,
- NULL, NULL, buffer, &size)) == ERROR_MORE_DATA) {
- size *= 2;
- buffer = reallocz(buffer, size);
- }
-
- if (status != ERROR_SUCCESS) {
- nd_log(NDLS_COLLECTORS, NDLP_ERR, "RegQueryValueEx failed with 0x%x.\n", status);
- return NULL;
- }
-
- return buffer;
-}
-
-void perflibFreePerformanceData(void) {
- getPerformanceData((const char *)0x01);
-}
-
-// --------------------------------------------------------------------------------------------------------------------
-
-// Retrieve the raw counter value and any supporting data needed to calculate
-// a displayable counter value. Use the counter type to determine the information
-// needed to calculate the value.
-
-static BOOL getCounterData(
- PERF_DATA_BLOCK *pDataBlock,
- PERF_OBJECT_TYPE* pObject,
- PERF_COUNTER_DEFINITION* pCounter,
- PERF_COUNTER_BLOCK* pCounterDataBlock,
- PRAW_DATA pRawData)
-{
- PVOID pData = NULL;
- UNALIGNED ULONGLONG* pullData = NULL;
- PERF_COUNTER_DEFINITION* pBaseCounter = NULL;
- BOOL fSuccess = TRUE;
-
- //Point to the raw counter data.
- pData = (PVOID)((LPBYTE)pCounterDataBlock + pCounter->CounterOffset);
-
- //Now use the PERF_COUNTER_DEFINITION.CounterType value to figure out what
- //other information you need to calculate a displayable value.
- switch (pCounter->CounterType) {
-
- case PERF_COUNTER_COUNTER:
- case PERF_COUNTER_QUEUELEN_TYPE:
- case PERF_SAMPLE_COUNTER:
- pRawData->Data = (ULONGLONG)(*(DWORD*)pData);
- pRawData->Time = pDataBlock->PerfTime.QuadPart;
- if (PERF_COUNTER_COUNTER == pCounter->CounterType || PERF_SAMPLE_COUNTER == pCounter->CounterType)
- pRawData->Frequency = pDataBlock->PerfFreq.QuadPart;
- break;
-
- case PERF_OBJ_TIME_TIMER:
- pRawData->Data = (ULONGLONG)(*(DWORD*)pData);
- pRawData->Time = pObject->PerfTime.QuadPart;
- break;
-
- case PERF_COUNTER_100NS_QUEUELEN_TYPE:
- pRawData->Data = *(UNALIGNED ULONGLONG *)pData;
- pRawData->Time = pDataBlock->PerfTime100nSec.QuadPart;
- break;
-
- case PERF_COUNTER_OBJ_TIME_QUEUELEN_TYPE:
- pRawData->Data = *(UNALIGNED ULONGLONG *)pData;
- pRawData->Time = pObject->PerfTime.QuadPart;
- break;
-
- case PERF_COUNTER_TIMER:
- case PERF_COUNTER_TIMER_INV:
- case PERF_COUNTER_BULK_COUNT:
- case PERF_COUNTER_LARGE_QUEUELEN_TYPE:
- pullData = (UNALIGNED ULONGLONG *)pData;
- pRawData->Data = *pullData;
- pRawData->Time = pDataBlock->PerfTime.QuadPart;
- if (pCounter->CounterType == PERF_COUNTER_BULK_COUNT)
- pRawData->Frequency = pDataBlock->PerfFreq.QuadPart;
- break;
-
- case PERF_COUNTER_MULTI_TIMER:
- case PERF_COUNTER_MULTI_TIMER_INV:
- pullData = (UNALIGNED ULONGLONG *)pData;
- pRawData->Data = *pullData;
- pRawData->Frequency = pDataBlock->PerfFreq.QuadPart;
- pRawData->Time = pDataBlock->PerfTime.QuadPart;
-
- //These counter types have a second counter value that is adjacent to
- //this counter value in the counter data block. The value is needed for
- //the calculation.
- if ((pCounter->CounterType & PERF_MULTI_COUNTER) == PERF_MULTI_COUNTER) {
- ++pullData;
- pRawData->MultiCounterData = *(DWORD*)pullData;
- }
- break;
-
- //These counters do not use any time reference.
- case PERF_COUNTER_RAWCOUNT:
- case PERF_COUNTER_RAWCOUNT_HEX:
- case PERF_COUNTER_DELTA:
- // some counters in these categories, have CounterSize = sizeof(ULONGLONG)
- // but the official documentation always uses them as sizeof(DWORD)
- pRawData->Data = (ULONGLONG)(*(DWORD*)pData);
- pRawData->Time = 0;
- break;
-
- case PERF_COUNTER_LARGE_RAWCOUNT:
- case PERF_COUNTER_LARGE_RAWCOUNT_HEX:
- case PERF_COUNTER_LARGE_DELTA:
- pRawData->Data = *(UNALIGNED ULONGLONG*)pData;
- pRawData->Time = 0;
- break;
-
- //These counters use the 100ns time base in their calculation.
- case PERF_100NSEC_TIMER:
- case PERF_100NSEC_TIMER_INV:
- case PERF_100NSEC_MULTI_TIMER:
- case PERF_100NSEC_MULTI_TIMER_INV:
- pullData = (UNALIGNED ULONGLONG*)pData;
- pRawData->Data = *pullData;
- pRawData->Time = pDataBlock->PerfTime100nSec.QuadPart;
-
- //These counter types have a second counter value that is adjacent to
- //this counter value in the counter data block. The value is needed for
- //the calculation.
- if ((pCounter->CounterType & PERF_MULTI_COUNTER) == PERF_MULTI_COUNTER) {
- ++pullData;
- pRawData->MultiCounterData = *(DWORD*)pullData;
- }
- break;
-
- //These counters use two data points, this value and one from this counter's
- //base counter. The base counter should be the next counter in the object's
- //list of counters.
- case PERF_SAMPLE_FRACTION:
- case PERF_RAW_FRACTION:
- pRawData->Data = (ULONGLONG)(*(DWORD*)pData);
- pBaseCounter = pCounter + 1; //Get base counter
- if ((pBaseCounter->CounterType & PERF_COUNTER_BASE) == PERF_COUNTER_BASE) {
- pData = (PVOID)((LPBYTE)pCounterDataBlock + pBaseCounter->CounterOffset);
- pRawData->Time = (LONGLONG)(*(DWORD*)pData);
- }
- else
- fSuccess = FALSE;
- break;
-
- case PERF_LARGE_RAW_FRACTION:
- case PERF_PRECISION_SYSTEM_TIMER:
- case PERF_PRECISION_100NS_TIMER:
- case PERF_PRECISION_OBJECT_TIMER:
- pRawData->Data = *(UNALIGNED ULONGLONG*)pData;
- pBaseCounter = pCounter + 1;
- if ((pBaseCounter->CounterType & PERF_COUNTER_BASE) == PERF_COUNTER_BASE) {
- pData = (PVOID)((LPBYTE)pCounterDataBlock + pBaseCounter->CounterOffset);
- pRawData->Time = *(LONGLONG*)pData;
- }
- else
- fSuccess = FALSE;
- break;
-
- case PERF_AVERAGE_TIMER:
- case PERF_AVERAGE_BULK:
- pRawData->Data = *(UNALIGNED ULONGLONG*)pData;
- pBaseCounter = pCounter+1;
- if ((pBaseCounter->CounterType & PERF_COUNTER_BASE) == PERF_COUNTER_BASE) {
- pData = (PVOID)((LPBYTE)pCounterDataBlock + pBaseCounter->CounterOffset);
- pRawData->Time = *(DWORD*)pData;
- }
- else
- fSuccess = FALSE;
-
- if (pCounter->CounterType == PERF_AVERAGE_TIMER)
- pRawData->Frequency = pDataBlock->PerfFreq.QuadPart;
- break;
-
- //These are base counters and are used in calculations for other counters.
- //This case should never be entered.
- case PERF_SAMPLE_BASE:
- case PERF_AVERAGE_BASE:
- case PERF_COUNTER_MULTI_BASE:
- case PERF_RAW_BASE:
- case PERF_LARGE_RAW_BASE:
- pRawData->Data = 0;
- pRawData->Time = 0;
- fSuccess = FALSE;
- break;
-
- case PERF_ELAPSED_TIME:
- pRawData->Data = *(UNALIGNED ULONGLONG*)pData;
- pRawData->Time = pObject->PerfTime.QuadPart;
- pRawData->Frequency = pObject->PerfFreq.QuadPart;
- break;
-
- //These counters are currently not supported.
- case PERF_COUNTER_TEXT:
- case PERF_COUNTER_NODATA:
- case PERF_COUNTER_HISTOGRAM_TYPE:
- default: // unknown counter types
- pRawData->Data = 0;
- pRawData->Time = 0;
- fSuccess = FALSE;
- break;
- }
-
- return fSuccess;
-}
-
-// --------------------------------------------------------------------------------------------------------------------
-
-static inline BOOL isValidPointer(PERF_DATA_BLOCK *pDataBlock __maybe_unused, void *ptr __maybe_unused) {
-#ifdef NETDATA_INTERNAL_CHECKS
- return (PBYTE)ptr >= (PBYTE)pDataBlock + pDataBlock->TotalByteLength ? FALSE : TRUE;
-#else
- return TRUE;
-#endif
-}
-
-static inline BOOL isValidStructure(PERF_DATA_BLOCK *pDataBlock __maybe_unused, void *ptr __maybe_unused, size_t length __maybe_unused) {
-#ifdef NETDATA_INTERNAL_CHECKS
- return (PBYTE)ptr + length > (PBYTE)pDataBlock + pDataBlock->TotalByteLength ? FALSE : TRUE;
-#else
- return TRUE;
-#endif
-}
-
-static inline PERF_DATA_BLOCK *getDataBlock(BYTE *pBuffer) {
- PERF_DATA_BLOCK *pDataBlock = (PERF_DATA_BLOCK *)pBuffer;
-
- static WCHAR signature[] = { 'P', 'E', 'R', 'F' };
-
- if(memcmp(pDataBlock->Signature, signature, sizeof(signature)) != 0) {
- nd_log(NDLS_COLLECTORS, NDLP_ERR,
- "WINDOWS: PERFLIB: Invalid data block signature.");
- return NULL;
- }
-
- if(!isValidPointer(pDataBlock, (PBYTE)pDataBlock + pDataBlock->SystemNameOffset) ||
- !isValidStructure(pDataBlock, (PBYTE)pDataBlock + pDataBlock->SystemNameOffset, pDataBlock->SystemNameLength)) {
- nd_log(NDLS_COLLECTORS, NDLP_ERR,
- "WINDOWS: PERFLIB: Invalid system name array.");
- return NULL;
- }
-
- return pDataBlock;
-}
-
-static inline PERF_OBJECT_TYPE *getObjectType(PERF_DATA_BLOCK* pDataBlock, PERF_OBJECT_TYPE *lastObjectType) {
- PERF_OBJECT_TYPE* pObjectType = NULL;
-
- if(!lastObjectType)
- pObjectType = (PERF_OBJECT_TYPE *)((PBYTE)pDataBlock + pDataBlock->HeaderLength);
- else if (lastObjectType->TotalByteLength != 0)
- pObjectType = (PERF_OBJECT_TYPE *)((PBYTE)lastObjectType + lastObjectType->TotalByteLength);
-
- if(pObjectType && (!isValidPointer(pDataBlock, pObjectType) || !isValidStructure(pDataBlock, pObjectType, pObjectType->TotalByteLength))) {
- nd_log(NDLS_COLLECTORS, NDLP_ERR,
- "WINDOWS: PERFLIB: Invalid ObjectType!");
- pObjectType = NULL;
- }
-
- return pObjectType;
-}
-
-inline PERF_OBJECT_TYPE *getObjectTypeByIndex(PERF_DATA_BLOCK *pDataBlock, DWORD ObjectNameTitleIndex) {
- PERF_OBJECT_TYPE *po = NULL;
- for(DWORD o = 0; o < pDataBlock->NumObjectTypes ; o++) {
- po = getObjectType(pDataBlock, po);
- if(po->ObjectNameTitleIndex == ObjectNameTitleIndex)
- return po;
- }
-
- return NULL;
-}
-
-static inline PERF_INSTANCE_DEFINITION *getInstance(
- PERF_DATA_BLOCK *pDataBlock,
- PERF_OBJECT_TYPE *pObjectType,
- PERF_COUNTER_BLOCK *lastCounterBlock
-) {
- PERF_INSTANCE_DEFINITION *pInstance;
-
- if(!lastCounterBlock)
- pInstance = (PERF_INSTANCE_DEFINITION *)((PBYTE)pObjectType + pObjectType->DefinitionLength);
- else
- pInstance = (PERF_INSTANCE_DEFINITION *)((PBYTE)lastCounterBlock + lastCounterBlock->ByteLength);
-
- if(pInstance && (!isValidPointer(pDataBlock, pInstance) || !isValidStructure(pDataBlock, pInstance, pInstance->ByteLength))) {
- nd_log(NDLS_COLLECTORS, NDLP_ERR,
- "WINDOWS: PERFLIB: Invalid Instance Definition!");
- pInstance = NULL;
- }
-
- return pInstance;
-}
-
-static inline PERF_COUNTER_BLOCK *getObjectTypeCounterBlock(
- PERF_DATA_BLOCK *pDataBlock,
- PERF_OBJECT_TYPE *pObjectType
-) {
- PERF_COUNTER_BLOCK *pCounterBlock = (PERF_COUNTER_BLOCK *)((PBYTE)pObjectType + pObjectType->DefinitionLength);
-
- if(pCounterBlock && (!isValidPointer(pDataBlock, pCounterBlock) || !isValidStructure(pDataBlock, pCounterBlock, pCounterBlock->ByteLength))) {
- nd_log(NDLS_COLLECTORS, NDLP_ERR,
- "WINDOWS: PERFLIB: Invalid ObjectType CounterBlock!");
- pCounterBlock = NULL;
- }
-
- return pCounterBlock;
-}
-
-static inline PERF_COUNTER_BLOCK *getInstanceCounterBlock(
- PERF_DATA_BLOCK *pDataBlock,
- PERF_OBJECT_TYPE *pObjectType,
- PERF_INSTANCE_DEFINITION *pInstance
-) {
- (void)pObjectType;
- PERF_COUNTER_BLOCK *pCounterBlock = (PERF_COUNTER_BLOCK *)((PBYTE)pInstance + pInstance->ByteLength);
-
- if(pCounterBlock && (!isValidPointer(pDataBlock, pCounterBlock) || !isValidStructure(pDataBlock, pCounterBlock, pCounterBlock->ByteLength))) {
- nd_log(NDLS_COLLECTORS, NDLP_ERR,
- "WINDOWS: PERFLIB: Invalid Instance CounterBlock!");
- pCounterBlock = NULL;
- }
-
- return pCounterBlock;
-}
-
-inline PERF_INSTANCE_DEFINITION *getInstanceByPosition(PERF_DATA_BLOCK *pDataBlock, PERF_OBJECT_TYPE *pObjectType, DWORD instancePosition) {
- PERF_INSTANCE_DEFINITION *pi = NULL;
- PERF_COUNTER_BLOCK *pc = NULL;
- for(DWORD i = 0; i <= instancePosition ;i++) {
- pi = getInstance(pDataBlock, pObjectType, pc);
- pc = getInstanceCounterBlock(pDataBlock, pObjectType, pi);
- }
- return pi;
-}
-
-static inline PERF_COUNTER_DEFINITION *getCounterDefinition(PERF_DATA_BLOCK *pDataBlock, PERF_OBJECT_TYPE *pObjectType, PERF_COUNTER_DEFINITION *lastCounterDefinition) {
- PERF_COUNTER_DEFINITION *pCounterDefinition = NULL;
-
- if(!lastCounterDefinition)
- pCounterDefinition = (PERF_COUNTER_DEFINITION *)((PBYTE)pObjectType + pObjectType->HeaderLength);
- else
- pCounterDefinition = (PERF_COUNTER_DEFINITION *)((PBYTE)lastCounterDefinition + lastCounterDefinition->ByteLength);
-
- if(pCounterDefinition && (!isValidPointer(pDataBlock, pCounterDefinition) || !isValidStructure(pDataBlock, pCounterDefinition, pCounterDefinition->ByteLength))) {
- nd_log(NDLS_COLLECTORS, NDLP_ERR,
- "WINDOWS: PERFLIB: Invalid Counter Definition!");
- pCounterDefinition = NULL;
- }
-
- return pCounterDefinition;
-}
-
-// --------------------------------------------------------------------------------------------------------------------
-
-static inline BOOL getEncodedStringToUTF8(char *dst, size_t dst_len, DWORD CodePage, char *start, DWORD length) {
- WCHAR *tempBuffer; // Temporary buffer for Unicode data
- DWORD charsCopied = 0;
- BOOL free_tempBuffer;
-
- if (CodePage == 0) {
- // Input is already Unicode (UTF-16)
- tempBuffer = (WCHAR *)start;
- charsCopied = length / sizeof(WCHAR); // Convert byte length to number of WCHARs
- free_tempBuffer = FALSE;
- }
- else {
- // Convert the multi-byte instance name to Unicode (UTF-16)
- // Calculate maximum possible characters in UTF-16
-
- int charCount = MultiByteToWideChar(CodePage, 0, start, (int)length, NULL, 0);
- tempBuffer = (WCHAR *)malloc(charCount * sizeof(WCHAR));
- if (!tempBuffer) return FALSE;
-
- charsCopied = MultiByteToWideChar(CodePage, 0, start, (int)length, tempBuffer, charCount);
- if (charsCopied == 0) {
- free(tempBuffer);
- dst[0] = '\0';
- return FALSE;
- }
-
- free_tempBuffer = TRUE;
- }
-
- // Now convert from Unicode (UTF-16) to UTF-8
- int bytesCopied = WideCharToMultiByte(CP_UTF8, 0, tempBuffer, (int)charsCopied, dst, (int)dst_len, NULL, NULL);
- if (bytesCopied == 0) {
- if (free_tempBuffer) free(tempBuffer);
- dst[0] = '\0'; // Ensure the buffer is null-terminated even on failure
- return FALSE;
- }
-
- dst[bytesCopied] = '\0'; // Ensure buffer is null-terminated
- if (free_tempBuffer) free(tempBuffer); // Free temporary buffer if used
- return TRUE;
-}
-
-inline BOOL getInstanceName(PERF_DATA_BLOCK *pDataBlock, PERF_OBJECT_TYPE *pObjectType, PERF_INSTANCE_DEFINITION *pInstance,
- char *buffer, size_t bufferLen) {
- (void)pDataBlock;
- if (!pInstance || !buffer || !bufferLen) return FALSE;
-
- return getEncodedStringToUTF8(buffer, bufferLen, pObjectType->CodePage,
- ((char *)pInstance + pInstance->NameOffset), pInstance->NameLength);
-}
-
-inline BOOL getSystemName(PERF_DATA_BLOCK *pDataBlock, char *buffer, size_t bufferLen) {
- return getEncodedStringToUTF8(buffer, bufferLen, 0,
- ((char *)pDataBlock + pDataBlock->SystemNameOffset), pDataBlock->SystemNameLength);
-}
-
-inline bool ObjectTypeHasInstances(PERF_DATA_BLOCK *pDataBlock, PERF_OBJECT_TYPE *pObjectType) {
- (void)pDataBlock;
- return pObjectType->NumInstances != PERF_NO_INSTANCES && pObjectType->NumInstances > 0;
-}
-
-PERF_OBJECT_TYPE *perflibFindObjectTypeByName(PERF_DATA_BLOCK *pDataBlock, const char *name) {
- PERF_OBJECT_TYPE* pObjectType = NULL;
- for(DWORD o = 0; o < pDataBlock->NumObjectTypes; o++) {
- pObjectType = getObjectType(pDataBlock, pObjectType);
- if(strcmp(name, RegistryFindNameByID(pObjectType->ObjectNameTitleIndex)) == 0)
- return pObjectType;
- }
-
- return NULL;
-}
-
-PERF_INSTANCE_DEFINITION *perflibForEachInstance(PERF_DATA_BLOCK *pDataBlock, PERF_OBJECT_TYPE *pObjectType, PERF_INSTANCE_DEFINITION *lastInstance) {
- if(!ObjectTypeHasInstances(pDataBlock, pObjectType))
- return NULL;
-
- return getInstance(pDataBlock, pObjectType,
- lastInstance ?
- getInstanceCounterBlock(pDataBlock, pObjectType, lastInstance) :
- NULL );
-}
-
-bool perflibGetInstanceCounter(PERF_DATA_BLOCK *pDataBlock, PERF_OBJECT_TYPE *pObjectType, PERF_INSTANCE_DEFINITION *pInstance, COUNTER_DATA *cd) {
- PERF_COUNTER_DEFINITION *pCounterDefinition = NULL;
- for(DWORD c = 0; c < pObjectType->NumCounters ;c++) {
- pCounterDefinition = getCounterDefinition(pDataBlock, pObjectType, pCounterDefinition);
- if(!pCounterDefinition) {
- nd_log(NDLS_COLLECTORS, NDLP_ERR,
- "WINDOWS: PERFLIB: Cannot read counter definition No %u (out of %u)",
- c, pObjectType->NumCounters);
- break;
- }
-
- if(cd->id) {
- if(cd->id != pCounterDefinition->CounterNameTitleIndex)
- continue;
- }
- else {
- if(strcmp(RegistryFindNameByID(pCounterDefinition->CounterNameTitleIndex), cd->key) != 0)
- continue;
-
- cd->id = pCounterDefinition->CounterNameTitleIndex;
- }
-
- cd->current.CounterType = cd->OverwriteCounterType ? cd->OverwriteCounterType : pCounterDefinition->CounterType;
- PERF_COUNTER_BLOCK *pCounterBlock = getInstanceCounterBlock(pDataBlock, pObjectType, pInstance);
-
- cd->previous = cd->current;
- cd->updated = getCounterData(pDataBlock, pObjectType, pCounterDefinition, pCounterBlock, &cd->current);
- return cd->updated;
- }
-
- cd->previous = cd->current;
- cd->current = RAW_DATA_EMPTY;
- cd->updated = false;
- return false;
-}
-
-bool perflibGetObjectCounter(PERF_DATA_BLOCK *pDataBlock, PERF_OBJECT_TYPE *pObjectType, COUNTER_DATA *cd) {
- PERF_COUNTER_DEFINITION *pCounterDefinition = NULL;
- for(DWORD c = 0; c < pObjectType->NumCounters ;c++) {
- pCounterDefinition = getCounterDefinition(pDataBlock, pObjectType, pCounterDefinition);
- if(!pCounterDefinition) {
- nd_log(NDLS_COLLECTORS, NDLP_ERR,
- "WINDOWS: PERFLIB: Cannot read counter definition No %u (out of %u)",
- c, pObjectType->NumCounters);
- break;
- }
-
- if(cd->id) {
- if(cd->id != pCounterDefinition->CounterNameTitleIndex)
- continue;
- }
- else {
- if(strcmp(RegistryFindNameByID(pCounterDefinition->CounterNameTitleIndex), cd->key) != 0)
- continue;
-
- cd->id = pCounterDefinition->CounterNameTitleIndex;
- }
-
- cd->current.CounterType = cd->OverwriteCounterType ? cd->OverwriteCounterType : pCounterDefinition->CounterType;
- PERF_COUNTER_BLOCK *pCounterBlock = getObjectTypeCounterBlock(pDataBlock, pObjectType);
-
- cd->previous = cd->current;
- cd->updated = getCounterData(pDataBlock, pObjectType, pCounterDefinition, pCounterBlock, &cd->current);
- return cd->updated;
- }
-
- cd->previous = cd->current;
- cd->current = RAW_DATA_EMPTY;
- cd->updated = false;
- return false;
-}
-
-PERF_DATA_BLOCK *perflibGetPerformanceData(DWORD id) {
- char source[24];
- snprintfz(source, sizeof(source), "%u", id);
-
- LPBYTE pData = (LPBYTE)getPerformanceData((id > 0) ? source : NULL);
- if (!pData) return NULL;
-
- PERF_DATA_BLOCK *pDataBlock = getDataBlock(pData);
- if(!pDataBlock) return NULL;
-
- return pDataBlock;
-}
-
-int perflibQueryAndTraverse(DWORD id,
- perflib_data_cb dataCb,
- perflib_object_cb objectCb,
- perflib_instance_cb instanceCb,
- perflib_instance_counter_cb instanceCounterCb,
- perflib_counter_cb counterCb,
- void *data) {
- int counters = -1;
-
- PERF_DATA_BLOCK *pDataBlock = perflibGetPerformanceData(id);
- if(!pDataBlock) goto cleanup;
-
- bool do_data = true;
- if(dataCb)
- do_data = dataCb(pDataBlock, data);
-
- PERF_OBJECT_TYPE* pObjectType = NULL;
- for(DWORD o = 0; do_data && o < pDataBlock->NumObjectTypes; o++) {
- pObjectType = getObjectType(pDataBlock, pObjectType);
- if(!pObjectType) {
- nd_log(NDLS_COLLECTORS, NDLP_ERR,
- "WINDOWS: PERFLIB: Cannot read object type No %d (out of %d)",
- o, pDataBlock->NumObjectTypes);
- break;
- }
-
- bool do_object = true;
- if(objectCb)
- do_object = objectCb(pDataBlock, pObjectType, data);
-
- if(!do_object)
- continue;
-
- if(ObjectTypeHasInstances(pDataBlock, pObjectType)) {
- PERF_INSTANCE_DEFINITION *pInstance = NULL;
- PERF_COUNTER_BLOCK *pCounterBlock = NULL;
- for(LONG i = 0; i < pObjectType->NumInstances ;i++) {
- pInstance = getInstance(pDataBlock, pObjectType, pCounterBlock);
- if(!pInstance) {
- nd_log(NDLS_COLLECTORS, NDLP_ERR,
- "WINDOWS: PERFLIB: Cannot read Instance No %d (out of %d)",
- i, pObjectType->NumInstances);
- break;
- }
-
- pCounterBlock = getInstanceCounterBlock(pDataBlock, pObjectType, pInstance);
- if(!pCounterBlock) {
- nd_log(NDLS_COLLECTORS, NDLP_ERR,
- "WINDOWS: PERFLIB: Cannot read CounterBlock of instance No %d (out of %d)",
- i, pObjectType->NumInstances);
- break;
- }
-
- bool do_instance = true;
- if(instanceCb)
- do_instance = instanceCb(pDataBlock, pObjectType, pInstance, data);
-
- if(!do_instance)
- continue;
-
- PERF_COUNTER_DEFINITION *pCounterDefinition = NULL;
- for(DWORD c = 0; c < pObjectType->NumCounters ;c++) {
- pCounterDefinition = getCounterDefinition(pDataBlock, pObjectType, pCounterDefinition);
- if(!pCounterDefinition) {
- nd_log(NDLS_COLLECTORS, NDLP_ERR,
- "WINDOWS: PERFLIB: Cannot read counter definition No %u (out of %u)",
- c, pObjectType->NumCounters);
- break;
- }
-
- RAW_DATA sample = {
- .CounterType = pCounterDefinition->CounterType,
- };
- if(getCounterData(pDataBlock, pObjectType, pCounterDefinition, pCounterBlock, &sample)) {
- // DisplayCalculatedValue(&sample, &sample);
-
- if(instanceCounterCb) {
- instanceCounterCb(pDataBlock, pObjectType, pInstance, pCounterDefinition, &sample, data);
- counters++;
- }
- }
- }
-
- if(instanceCb)
- instanceCb(pDataBlock, pObjectType, NULL, data);
- }
- }
- else {
- PERF_COUNTER_BLOCK *pCounterBlock = getObjectTypeCounterBlock(pDataBlock, pObjectType);
- PERF_COUNTER_DEFINITION *pCounterDefinition = NULL;
- for(DWORD c = 0; c < pObjectType->NumCounters ;c++) {
- pCounterDefinition = getCounterDefinition(pDataBlock, pObjectType, pCounterDefinition);
- if(!pCounterDefinition) {
- nd_log(NDLS_COLLECTORS, NDLP_ERR,
- "WINDOWS: PERFLIB: Cannot read counter definition No %u (out of %u)",
- c, pObjectType->NumCounters);
- break;
- }
-
- RAW_DATA sample = {
- .CounterType = pCounterDefinition->CounterType,
- };
- if(getCounterData(pDataBlock, pObjectType, pCounterDefinition, pCounterBlock, &sample)) {
- // DisplayCalculatedValue(&sample, &sample);
-
- if(counterCb) {
- counterCb(pDataBlock, pObjectType, pCounterDefinition, &sample, data);
- counters++;
- }
- }
- }
- }
-
- if(objectCb)
- objectCb(pDataBlock, NULL, data);
- }
-
-cleanup:
- return counters;
-}
diff --git a/src/collectors/windows.plugin/perflib.h b/src/collectors/windows.plugin/perflib.h
deleted file mode 100644
index deba4e9a3..000000000
--- a/src/collectors/windows.plugin/perflib.h
+++ /dev/null
@@ -1,72 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-#ifndef NETDATA_PERFLIB_H
-#define NETDATA_PERFLIB_H
-
-#include "libnetdata/libnetdata.h"
-#include <windows.h>
-
-const char *RegistryFindNameByID(DWORD id);
-const char *RegistryFindHelpByID(DWORD id);
-DWORD RegistryFindIDByName(const char *name);
-#define PERFLIB_REGISTRY_NAME_NOT_FOUND (DWORD)-1
-
-PERF_DATA_BLOCK *perflibGetPerformanceData(DWORD id);
-void perflibFreePerformanceData(void);
-PERF_OBJECT_TYPE *perflibFindObjectTypeByName(PERF_DATA_BLOCK *pDataBlock, const char *name);
-PERF_INSTANCE_DEFINITION *perflibForEachInstance(PERF_DATA_BLOCK *pDataBlock, PERF_OBJECT_TYPE *pObjectType, PERF_INSTANCE_DEFINITION *lastInstance);
-
-typedef struct _rawdata {
- DWORD CounterType;
- DWORD MultiCounterData; // Second raw counter value for multi-valued counters
- ULONGLONG Data; // Raw counter data
- LONGLONG Time; // Is a time value or a base value
- LONGLONG Frequency;
-} RAW_DATA, *PRAW_DATA;
-
-typedef struct _counterdata {
- DWORD id;
- bool updated;
- const char *key;
- DWORD OverwriteCounterType; // if set, the counter type will be overwritten once read
- RAW_DATA current;
- RAW_DATA previous;
-} COUNTER_DATA;
-
-#define RAW_DATA_EMPTY (RAW_DATA){ 0 }
-
-bool perflibGetInstanceCounter(PERF_DATA_BLOCK *pDataBlock, PERF_OBJECT_TYPE *pObjectType, PERF_INSTANCE_DEFINITION *pInstance, COUNTER_DATA *cd);
-bool perflibGetObjectCounter(PERF_DATA_BLOCK *pDataBlock, PERF_OBJECT_TYPE *pObjectType, COUNTER_DATA *cd);
-
-typedef bool (*perflib_data_cb)(PERF_DATA_BLOCK *pDataBlock, void *data);
-typedef bool (*perflib_object_cb)(PERF_DATA_BLOCK *pDataBlock, PERF_OBJECT_TYPE *pObjectType, void *data);
-typedef bool (*perflib_instance_cb)(PERF_DATA_BLOCK *pDataBlock, PERF_OBJECT_TYPE *pObjectType, PERF_INSTANCE_DEFINITION *pInstance, void *data);
-typedef bool (*perflib_instance_counter_cb)(PERF_DATA_BLOCK *pDataBlock, PERF_OBJECT_TYPE *pObjectType, PERF_INSTANCE_DEFINITION *pInstance, PERF_COUNTER_DEFINITION *pCounter, RAW_DATA *sample, void *data);
-typedef bool (*perflib_counter_cb)(PERF_DATA_BLOCK *pDataBlock, PERF_OBJECT_TYPE *pObjectType, PERF_COUNTER_DEFINITION *pCounter, RAW_DATA *sample, void *data);
-
-int perflibQueryAndTraverse(DWORD id,
- perflib_data_cb dataCb,
- perflib_object_cb objectCb,
- perflib_instance_cb instanceCb,
- perflib_instance_counter_cb instanceCounterCb,
- perflib_counter_cb counterCb,
- void *data);
-
-bool ObjectTypeHasInstances(PERF_DATA_BLOCK *pDataBlock, PERF_OBJECT_TYPE *pObjectType);
-
-BOOL getInstanceName(PERF_DATA_BLOCK *pDataBlock, PERF_OBJECT_TYPE *pObjectType, PERF_INSTANCE_DEFINITION *pInstance,
- char *buffer, size_t bufferLen);
-
-BOOL getSystemName(PERF_DATA_BLOCK *pDataBlock, char *buffer, size_t bufferLen);
-
-PERF_OBJECT_TYPE *getObjectTypeByIndex(PERF_DATA_BLOCK *pDataBlock, DWORD ObjectNameTitleIndex);
-
-PERF_INSTANCE_DEFINITION *getInstanceByPosition(
- PERF_DATA_BLOCK *pDataBlock,
- PERF_OBJECT_TYPE *pObjectType,
- DWORD instancePosition);
-
-void PerflibNamesRegistryInitialize(void);
-void PerflibNamesRegistryUpdate(void);
-
-#endif //NETDATA_PERFLIB_H
diff --git a/src/collectors/windows.plugin/windows-internals.h b/src/collectors/windows.plugin/windows-internals.h
index 1b7cccc73..70d44b902 100644
--- a/src/collectors/windows.plugin/windows-internals.h
+++ b/src/collectors/windows.plugin/windows-internals.h
@@ -1,18 +1,17 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-#ifndef NETDATA_WINDOWS_INTERNALS_H
-#define NETDATA_WINDOWS_INTERNALS_H
-
-#include <windows.h>
-
-static inline ULONGLONG FileTimeToULL(FILETIME ft) {
- ULARGE_INTEGER ul;
- ul.LowPart = ft.dwLowDateTime;
- ul.HighPart = ft.dwHighDateTime;
- return ul.QuadPart;
-}
-
-#include "perflib.h"
-#include "perflib-rrd.h"
-
-#endif //NETDATA_WINDOWS_INTERNALS_H
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_WINDOWS_INTERNALS_H
+#define NETDATA_WINDOWS_INTERNALS_H
+
+#include "libnetdata/libnetdata.h"
+
+static inline ULONGLONG FileTimeToULL(FILETIME ft) {
+ ULARGE_INTEGER ul;
+ ul.LowPart = ft.dwLowDateTime;
+ ul.HighPart = ft.dwHighDateTime;
+ return ul.QuadPart;
+}
+
+#include "perflib-rrd.h"
+
+#endif //NETDATA_WINDOWS_INTERNALS_H
diff --git a/src/collectors/windows.plugin/windows_plugin.c b/src/collectors/windows.plugin/windows_plugin.c
index 35ef857be..74b72e0ce 100644
--- a/src/collectors/windows.plugin/windows_plugin.c
+++ b/src/collectors/windows.plugin/windows_plugin.c
@@ -13,18 +13,26 @@ static struct proc_module {
} win_modules[] = {
// system metrics
- {.name = "GetSystemUptime", .dim = "GetSystemUptime", .func = do_GetSystemUptime},
- {.name = "GetSystemRAM", .dim = "GetSystemRAM", .func = do_GetSystemRAM},
+ {.name = "GetSystemUptime", .dim = "GetSystemUptime", .enabled = CONFIG_BOOLEAN_YES, .func = do_GetSystemUptime},
+ {.name = "GetSystemRAM", .dim = "GetSystemRAM", .enabled = CONFIG_BOOLEAN_YES, .func = do_GetSystemRAM},
// the same is provided by PerflibProcessor, with more detailed analysis
- //{.name = "GetSystemCPU", .dim = "GetSystemCPU", .func = do_GetSystemCPU},
+ //{.name = "GetSystemCPU", .dim = "GetSystemCPU", .enabled = CONFIG_BOOLEAN_YES, .func = do_GetSystemCPU},
- {.name = "PerflibProcesses", .dim = "PerflibProcesses", .func = do_PerflibProcesses},
- {.name = "PerflibProcessor", .dim = "PerflibProcessor", .func = do_PerflibProcessor},
- {.name = "PerflibMemory", .dim = "PerflibMemory", .func = do_PerflibMemory},
- {.name = "PerflibStorage", .dim = "PerflibStorage", .func = do_PerflibStorage},
- {.name = "PerflibNetwork", .dim = "PerflibNetwork", .func = do_PerflibNetwork},
- {.name = "PerflibObjects", .dim = "PerflibObjects", .func = do_PerflibObjects},
+ {.name = "PerflibProcesses", .dim = "PerflibProcesses", .enabled = CONFIG_BOOLEAN_YES, .func = do_PerflibProcesses},
+ {.name = "PerflibProcessor", .dim = "PerflibProcessor", .enabled = CONFIG_BOOLEAN_YES, .func = do_PerflibProcessor},
+ {.name = "PerflibMemory", .dim = "PerflibMemory", .enabled = CONFIG_BOOLEAN_YES, .func = do_PerflibMemory},
+ {.name = "PerflibStorage", .dim = "PerflibStorage", .enabled = CONFIG_BOOLEAN_YES, .func = do_PerflibStorage},
+ {.name = "PerflibNetwork", .dim = "PerflibNetwork", .enabled = CONFIG_BOOLEAN_YES, .func = do_PerflibNetwork},
+ {.name = "PerflibObjects", .dim = "PerflibObjects", .enabled = CONFIG_BOOLEAN_YES, .func = do_PerflibObjects},
+ {.name = "PerflibHyperV", .dim = "PerflibHyperV", .enabled = CONFIG_BOOLEAN_YES, .func = do_PerflibHyperV},
+
+ {.name = "PerflibThermalZone", .dim = "PerflibThermalZone", .enabled = CONFIG_BOOLEAN_NO, .func = do_PerflibThermalZone},
+
+ {.name = "PerflibWebService", .dim = "PerflibWebService", .enabled = CONFIG_BOOLEAN_YES, .func = do_PerflibWebService},
+ {.name = "PerflibMSSQL", .dim = "PerflibMSSQL", .enabled = CONFIG_BOOLEAN_YES, .func = do_PerflibMSSQL},
+
+ {.name = "PerflibNetFramework", .dim = "PerflibNetFramework", .enabled = CONFIG_BOOLEAN_YES, .func = do_PerflibNetFramework},
// the terminator of this array
{.name = NULL, .dim = NULL, .func = NULL}
@@ -66,15 +74,14 @@ void *win_plugin_main(void *ptr) {
for(i = 0; win_modules[i].name; i++) {
struct proc_module *pm = &win_modules[i];
- pm->enabled = config_get_boolean("plugin:windows", pm->name, CONFIG_BOOLEAN_YES);
+ pm->enabled = config_get_boolean("plugin:windows", pm->name, pm->enabled);
pm->rd = NULL;
worker_register_job_name(i, win_modules[i].dim);
}
- usec_t step = localhost->rrd_update_every * USEC_PER_SEC;
heartbeat_t hb;
- heartbeat_init(&hb);
+ heartbeat_init(&hb, localhost->rrd_update_every * USEC_PER_SEC);
#define LGS_MODULE_ID 0
@@ -86,7 +93,7 @@ void *win_plugin_main(void *ptr) {
while(service_running(SERVICE_COLLECTORS)) {
worker_is_idle();
- usec_t hb_dt = heartbeat_next(&hb, step);
+ usec_t hb_dt = heartbeat_next(&hb);
if(unlikely(!service_running(SERVICE_COLLECTORS)))
break;
diff --git a/src/collectors/windows.plugin/windows_plugin.h b/src/collectors/windows.plugin/windows_plugin.h
index 73c1ecda1..3852653ed 100644
--- a/src/collectors/windows.plugin/windows_plugin.h
+++ b/src/collectors/windows.plugin/windows_plugin.h
@@ -24,7 +24,82 @@ int do_PerflibProcesses(int update_every, usec_t dt);
int do_PerflibProcessor(int update_every, usec_t dt);
int do_PerflibMemory(int update_every, usec_t dt);
int do_PerflibObjects(int update_every, usec_t dt);
+int do_PerflibThermalZone(int update_every, usec_t dt);
+int do_PerflibWebService(int update_every, usec_t dt);
+int do_PerflibMSSQL(int update_every, usec_t dt);
+int do_PerflibNetFramework(int update_every, usec_t dt);
-#include "perflib.h"
+enum PERFLIB_PRIO {
+ PRIO_WEBSITE_IIS_REQUESTS_RATE = 21000, // PRIO selected, because APPS is using 20YYY
+ PRIO_WEBSITE_IIS_REQUESTS_BY_TYPE_RATE,
+ PRIO_WEBSITE_IIS_TRAFFIC,
+ PRIO_WEBSITE_IIS_FTP_FILE_TRANSFER_RATE,
+ PRIO_WEBSITE_IIS_ACTIVE_CONNECTIONS_COUNT,
+ PRIO_WEBSITE_IIS_CONNECTIONS_ATTEMP,
+ PRIO_WEBSITE_IIS_USERS,
+ PRIO_WEBSITE_IIS_ISAPI_EXT_REQUEST_COUNT,
+ PRIO_WEBSITE_IIS_ISAPI_EXT_REQUEST_RATE,
+ PRIO_WEBSITE_IIS_ERRORS_RATE,
+ PRIO_WEBSITE_IIS_LOGON_ATTEMPTS,
+ PRIO_WEBSITE_IIS_UPTIME,
+
+ PRIO_MSSQL_USER_CONNECTIONS,
+
+ PRIO_MSSQL_DATABASE_TRANSACTIONS,
+ PRIO_MSSQL_DATABASE_ACTIVE_TRANSACTIONS,
+ PRIO_MSSQL_DATABASE_WRITE_TRANSACTIONS,
+ PRIO_MSSQL_DATABASE_BACKUP_RESTORE_OPERATIONS,
+ PRIO_MSSQL_DATABASE_LOG_FLUSHES,
+ PRIO_MSSQL_DATABASE_LOG_FLUSHED,
+
+ PRIO_MSSQL_DATABASE_DATA_FILE_SIZE,
+
+ PRIO_MSSQL_STATS_BATCH_REQUEST,
+ PRIO_MSSQL_STATS_COMPILATIONS,
+ PRIO_MSSQL_STATS_RECOMPILATIONS,
+ PRIO_MSSQL_STATS_AUTO_PARAMETRIZATION,
+ PRIO_MSSQL_STATS_SAFE_AUTO_PARAMETRIZATION,
+
+ PRIO_MSSQL_BLOCKED_PROCESSES,
+
+ PRIO_MSSQL_BUFF_CACHE_HIT_RATIO,
+ PRIO_MSSQL_BUFF_MAN_IOPS,
+ PRIO_MSSQL_BUFF_CHECKPOINT_PAGES,
+ PRIO_MSSQL_BUFF_METHODS_PAGE_SPLIT,
+ PRIO_MSSQL_BUFF_PAGE_LIFE_EXPECTANCY,
+
+ PRIO_MSSQL_MEMMGR_CONNECTION_MEMORY_BYTES,
+ PRIO_MSSQL_MEMMGR_TOTAL_SERVER,
+ PRIO_MSSQL_MEMMGR_EXTERNAL_BENEFIT_OF_MEMORY,
+ PRIO_MSSQL_MEMMGR_PENDING_MEMORY_GRANTS,
+
+ PRIO_MSSQL_LOCKS_WAIT,
+ PRIO_MSSQL_LOCKS_DEADLOCK,
+
+ PRIO_MSSQL_SQL_ERRORS,
+
+ PRIO_NETFRAMEWORK_CLR_EXCEPTION_THROWN,
+ PRIO_NETFRAMEWORK_CLR_EXCEPTION_FILTERS,
+ PRIO_NETFRAMEWORK_CLR_EXCEPTION_FINALLYS,
+ PRIO_NETFRAMEWORK_CLR_EXCEPTION_THROW_TO_CATCH_DEPTH,
+
+ PRIO_NETFRAMEWORK_CLR_INTEROP_CCW,
+ PRIO_NETFRAMEWORK_CLR_INTEROP_MARSHALLING,
+ PRIO_NETFRAMEWORK_CLR_INTEROP_STUBS_CREATED,
+
+ PRIO_NETFRAMEWORK_CLR_JIT_METHODS,
+ PRIO_NETFRAMEWORK_CLR_JIT_TIME,
+ PRIO_NETFRAMEWORK_CLR_JIT_STANDARD_FAILURES,
+ PRIO_NETFRAMEWORK_CLR_JIT_IL_BYTES,
+
+ PRIO_NETFRAMEWORK_CLR_LOADING_HEAP_SIZE,
+ PRIO_NETFRAMEWORK_CLR_LOADING_APP_DOMAINS_LOADED,
+ PRIO_NETFRAMEWORK_CLR_LOADING_APP_DOMAINS_UNLOADED,
+ PRIO_NETFRAMEWORK_CLR_LOADING_ASSEMBLIES_LOADED,
+ PRIO_NETFRAMEWORK_CLR_LOADING_CLASSES_LOADED,
+ PRIO_NETFRAMEWORK_CLR_LOADING_CLASS_LOAD_FAILURE
+};
+
+int do_PerflibHyperV(int update_every, usec_t dt);
#endif //NETDATA_WINDOWS_PLUGIN_H
diff --git a/src/collectors/xenstat.plugin/integrations/xen_xcp-ng.md b/src/collectors/xenstat.plugin/integrations/xen_xcp-ng.md
index 2aed4a06e..cd356202d 100644
--- a/src/collectors/xenstat.plugin/integrations/xen_xcp-ng.md
+++ b/src/collectors/xenstat.plugin/integrations/xen_xcp-ng.md
@@ -151,8 +151,8 @@ The file format is a modified INI syntax. The general structure is:
[section2]
option3 = some third value
```
-You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+You can edit the configuration file using the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#edit-a-configuration-file-using-edit-config) script from the
+Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
```bash
cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
diff --git a/src/collectors/xenstat.plugin/xenstat_plugin.c b/src/collectors/xenstat.plugin/xenstat_plugin.c
index e4b8a2bd0..63592b6fd 100644
--- a/src/collectors/xenstat.plugin/xenstat_plugin.c
+++ b/src/collectors/xenstat.plugin/xenstat_plugin.c
@@ -920,8 +920,6 @@ static void xenstat_send_domain_metrics() {
}
int main(int argc, char **argv) {
- clocks_init();
-
// ------------------------------------------------------------------------
// initialization of netdata plugin
@@ -1022,12 +1020,11 @@ int main(int argc, char **argv) {
time_t started_t = now_monotonic_sec();
size_t iteration;
- usec_t step = netdata_update_every * USEC_PER_SEC;
heartbeat_t hb;
- heartbeat_init(&hb);
+ heartbeat_init(&hb, netdata_update_every * USEC_PER_SEC);
for(iteration = 0; 1; iteration++) {
- usec_t dt = heartbeat_next(&hb, step);
+ usec_t dt = heartbeat_next(&hb);
if(unlikely(netdata_exit)) break;